Merge pull request #3696 from karalabe/contextual-logger
Contextual logger
This commit is contained in:
commit
357732a840
@ -22,26 +22,26 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WaitMined waits for tx to be mined on the blockchain.
|
// WaitMined waits for tx to be mined on the blockchain.
|
||||||
// It stops waiting when the context is canceled.
|
// It stops waiting when the context is canceled.
|
||||||
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
|
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
|
||||||
queryTicker := time.NewTicker(1 * time.Second)
|
queryTicker := time.NewTicker(time.Second)
|
||||||
defer queryTicker.Stop()
|
defer queryTicker.Stop()
|
||||||
loghash := tx.Hash().Hex()[:8]
|
|
||||||
|
logger := log.New("hash", tx.Hash().Hex()[:8])
|
||||||
for {
|
for {
|
||||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||||
if receipt != nil {
|
if receipt != nil {
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Detail).Infof("tx %x error: %v", loghash, err)
|
logger.Trace("Receipt retrieval failed", "error", err)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Detail).Infof("tx %x not yet mined...", loghash)
|
logger.Trace("Transaction not yet mined")
|
||||||
}
|
}
|
||||||
// Wait for the next round.
|
// Wait for the next round.
|
||||||
select {
|
select {
|
||||||
|
@ -30,8 +30,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||||
@ -210,8 +209,8 @@ func (ac *accountCache) close() {
|
|||||||
// Callers must hold ac.mu.
|
// Callers must hold ac.mu.
|
||||||
func (ac *accountCache) reload() {
|
func (ac *accountCache) reload() {
|
||||||
accounts, err := ac.scan()
|
accounts, err := ac.scan()
|
||||||
if err != nil && glog.V(logger.Debug) {
|
if err != nil {
|
||||||
glog.Errorf("can't load keys: %v", err)
|
log.Debug("Failed to reload keystore contents", "error", err)
|
||||||
}
|
}
|
||||||
ac.all = accounts
|
ac.all = accounts
|
||||||
sort.Sort(ac.all)
|
sort.Sort(ac.all)
|
||||||
@ -225,7 +224,7 @@ func (ac *accountCache) reload() {
|
|||||||
case ac.notify <- struct{}{}:
|
case ac.notify <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all))
|
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||||
@ -244,12 +243,14 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||||||
for _, fi := range files {
|
for _, fi := range files {
|
||||||
path := filepath.Join(ac.keydir, fi.Name())
|
path := filepath.Join(ac.keydir, fi.Name())
|
||||||
if skipKeyFile(fi) {
|
if skipKeyFile(fi) {
|
||||||
glog.V(logger.Detail).Infof("ignoring file %s", path)
|
log.Trace("Ignoring file on account scan", "path", path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
logger := log.New("path", path)
|
||||||
|
|
||||||
fd, err := os.Open(path)
|
fd, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Detail).Infoln(err)
|
logger.Trace("Failed to open keystore file", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
buf.Reset(fd)
|
buf.Reset(fd)
|
||||||
@ -259,9 +260,9 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||||||
addr := common.HexToAddress(keyJSON.Address)
|
addr := common.HexToAddress(keyJSON.Address)
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err)
|
logger.Debug("Failed to decode keystore key", "error", err)
|
||||||
case (addr == common.Address{}):
|
case (addr == common.Address{}):
|
||||||
glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path)
|
logger.Debug("Failed to decode keystore key", "error", "missing or zero address")
|
||||||
default:
|
default:
|
||||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,7 @@ package keystore
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/rjeczalik/notify"
|
"github.com/rjeczalik/notify"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -64,15 +63,16 @@ func (w *watcher) loop() {
|
|||||||
w.starting = false
|
w.starting = false
|
||||||
w.ac.mu.Unlock()
|
w.ac.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
logger := log.New("path", w.ac.keydir)
|
||||||
|
|
||||||
err := notify.Watch(w.ac.keydir, w.ev, notify.All)
|
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
|
||||||
if err != nil {
|
logger.Trace("Failed to watch keystore folder", "error", err)
|
||||||
glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer notify.Stop(w.ev)
|
defer notify.Stop(w.ev)
|
||||||
glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir)
|
|
||||||
defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir)
|
logger.Trace("Started watching keystore folder")
|
||||||
|
defer logger.Trace("Stopped watching keystore folder")
|
||||||
|
|
||||||
w.ac.mu.Lock()
|
w.ac.mu.Lock()
|
||||||
w.running = true
|
w.running = true
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/karalabe/hid"
|
"github.com/karalabe/hid"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ func (hub *LedgerHub) refreshWallets() {
|
|||||||
}
|
}
|
||||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||||
wallet := &ledgerWallet{url: &url, info: ledger}
|
wallet := &ledgerWallet{url: &url, info: ledger, logger: log.New("url", url)}
|
||||||
|
|
||||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||||
wallets = append(wallets, wallet)
|
wallets = append(wallets, wallet)
|
||||||
|
@ -33,9 +33,9 @@ import (
|
|||||||
ethereum "github.com/ethereum/go-ethereum"
|
ethereum "github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/karalabe/hid"
|
"github.com/karalabe/hid"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -123,6 +123,8 @@ type ledgerWallet struct {
|
|||||||
// must only ever hold a *read* lock to stateLock.
|
// must only ever hold a *read* lock to stateLock.
|
||||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||||
|
|
||||||
|
logger log.Logger // Contextual logger to tag the ledger with its id
|
||||||
}
|
}
|
||||||
|
|
||||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||||
@ -220,8 +222,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
|
|||||||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||||
func (w *ledgerWallet) heartbeat() {
|
func (w *ledgerWallet) heartbeat() {
|
||||||
glog.V(logger.Debug).Infof("%s health-check started", w.url.String())
|
w.logger.Debug("Ledger health-check started")
|
||||||
defer glog.V(logger.Debug).Infof("%s health-check stopped", w.url.String())
|
defer w.logger.Debug("Ledger health-check stopped")
|
||||||
|
|
||||||
// Execute heartbeat checks until termination or error
|
// Execute heartbeat checks until termination or error
|
||||||
var (
|
var (
|
||||||
@ -260,7 +262,7 @@ func (w *ledgerWallet) heartbeat() {
|
|||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%s health-check failed: %v", w.url.String(), err)
|
w.logger.Debug("Ledger health-check failed", "error", err)
|
||||||
errc = <-w.healthQuit
|
errc = <-w.healthQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
@ -348,8 +350,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
|
|||||||
// selfDerive is an account derivation loop that upon request attempts to find
|
// selfDerive is an account derivation loop that upon request attempts to find
|
||||||
// new non-zero accounts.
|
// new non-zero accounts.
|
||||||
func (w *ledgerWallet) selfDerive() {
|
func (w *ledgerWallet) selfDerive() {
|
||||||
glog.V(logger.Debug).Infof("%s self-derivation started", w.url.String())
|
w.logger.Debug("Ledger self-derivation started")
|
||||||
defer glog.V(logger.Debug).Infof("%s self-derivation stopped", w.url.String())
|
defer w.logger.Debug("Ledger self-derivation stopped")
|
||||||
|
|
||||||
// Execute self-derivations until termination or error
|
// Execute self-derivations until termination or error
|
||||||
var (
|
var (
|
||||||
@ -394,7 +396,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
// Retrieve the next derived Ethereum account
|
// Retrieve the next derived Ethereum account
|
||||||
if nextAddr == (common.Address{}) {
|
if nextAddr == (common.Address{}) {
|
||||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||||
glog.V(logger.Warn).Infof("%s self-derivation failed: %v", w.url.String(), err)
|
w.logger.Warn("Ledger account derivation failed", "error", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -405,12 +407,12 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
)
|
)
|
||||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Warn).Infof("%s self-derivation balance retrieval failed: %v", w.url.String(), err)
|
w.logger.Warn("Ledger balance retrieval failed", "error", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Warn).Infof("%s self-derivation nonce retrieval failed: %v", w.url.String(), err)
|
w.logger.Warn("Ledger nonce retrieval failed", "error", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||||
@ -430,7 +432,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
|
|
||||||
// Display a log message to the user for new (or previously empty accounts)
|
// Display a log message to the user for new (or previously empty accounts)
|
||||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||||
glog.V(logger.Info).Infof("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path)
|
w.logger.Info("Ledger discovered new account", "address", nextAddr.Hex(), "path", path, "balance", balance, "nonce", nonce)
|
||||||
}
|
}
|
||||||
// Fetch the next potential account
|
// Fetch the next potential account
|
||||||
if !empty {
|
if !empty {
|
||||||
@ -469,7 +471,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%s self-derivation failed: %s", w.url.String(), err)
|
w.logger.Debug("Ledger self-derivation failed", "error", err)
|
||||||
errc = <-w.deriveQuit
|
errc = <-w.deriveQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
@ -849,9 +851,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||||||
apdu = nil
|
apdu = nil
|
||||||
}
|
}
|
||||||
// Send over to the device
|
// Send over to the device
|
||||||
if glog.V(logger.Detail) {
|
w.logger.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||||
glog.Infof("-> %s: %x", w.device.Path, chunk)
|
|
||||||
}
|
|
||||||
if _, err := w.device.Write(chunk); err != nil {
|
if _, err := w.device.Write(chunk); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -864,9 +864,8 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if glog.V(logger.Detail) {
|
w.logger.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||||
glog.Infof("<- %s: %x", w.device.Path, chunk)
|
|
||||||
}
|
|
||||||
// Make sure the transport header matches
|
// Make sure the transport header matches
|
||||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||||
return nil, errReplyInvalidHeader
|
return nil, errReplyInvalidHeader
|
||||||
|
@ -47,7 +47,7 @@ var (
|
|||||||
// boring stuff
|
// boring stuff
|
||||||
"vendor/", "tests/files/", "build/",
|
"vendor/", "tests/files/", "build/",
|
||||||
// don't relicense vendored sources
|
// don't relicense vendored sources
|
||||||
"crypto/sha3/", "crypto/ecies/", "logger/glog/",
|
"crypto/sha3/", "crypto/ecies/", "log/",
|
||||||
"crypto/secp256k1/curve.go",
|
"crypto/secp256k1/curve.go",
|
||||||
// don't license generated files
|
// don't license generated files
|
||||||
"contracts/chequebook/contract/",
|
"contracts/chequebook/contract/",
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||||
@ -42,15 +42,19 @@ func main() {
|
|||||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||||
|
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
|
||||||
|
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||||
|
|
||||||
nodeKey *ecdsa.PrivateKey
|
nodeKey *ecdsa.PrivateKey
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
|
|
||||||
flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
|
|
||||||
glog.SetToStderr(true)
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
|
||||||
|
glogger.Verbosity(log.Lvl(*verbosity))
|
||||||
|
glogger.Vmodule(*vmodule)
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
natm, err := nat.Parse(*natdesc)
|
natm, err := nat.Parse(*natdesc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("-nat: %v", err)
|
utils.Fatalf("-nat: %v", err)
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@ -70,7 +70,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func runTestWithReader(test string, r io.Reader) error {
|
func runTestWithReader(test string, r io.Reader) error {
|
||||||
glog.Infoln("runTest", test)
|
log.Info("Running test", "test", test)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
switch strings.ToLower(test) {
|
switch strings.ToLower(test) {
|
||||||
case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
|
case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
|
||||||
@ -92,7 +93,8 @@ func runTestWithReader(test string, r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getFiles(path string) ([]string, error) {
|
func getFiles(path string) ([]string, error) {
|
||||||
glog.Infoln("getFiles", path)
|
log.Info("Listing files", "path", path)
|
||||||
|
|
||||||
var files []string
|
var files []string
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -113,7 +115,7 @@ func getFiles(path string) ([]string, error) {
|
|||||||
// only go 1 depth and leave directory entires blank
|
// only go 1 depth and leave directory entires blank
|
||||||
if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension {
|
if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension {
|
||||||
files[i] = filepath.Join(path, v.Name())
|
files[i] = filepath.Join(path, v.Name())
|
||||||
glog.Infoln("Found file", files[i])
|
log.Info("Found test file", "file", files[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case mode.IsRegular():
|
case mode.IsRegular():
|
||||||
@ -134,7 +136,9 @@ func runSuite(test, file string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, curTest := range tests {
|
for _, curTest := range tests {
|
||||||
glog.Infoln("runSuite", curTest, file)
|
suiteLogger := log.New("suite", file, "test", curTest)
|
||||||
|
suiteLogger.Info("Running test suite")
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
var files []string
|
var files []string
|
||||||
if test == defaultTest {
|
if test == defaultTest {
|
||||||
@ -149,30 +153,31 @@ func runSuite(test, file string) {
|
|||||||
files, err = getFiles(file)
|
files, err = getFiles(file)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalln(err)
|
suiteLogger.Crit("Failed to gather files", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
glog.Warningln("No files matched path")
|
suiteLogger.Warn("No files matched path")
|
||||||
}
|
}
|
||||||
for _, curFile := range files {
|
for _, curFile := range files {
|
||||||
// Skip blank entries
|
// Skip blank entries
|
||||||
if len(curFile) == 0 {
|
if len(curFile) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
testLogger := suiteLogger.New("file", curFile)
|
||||||
|
|
||||||
r, err := os.Open(curFile)
|
r, err := os.Open(curFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalln(err)
|
testLogger.Crit("Failed to open file")
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
err = runTestWithReader(curTest, r)
|
err = runTestWithReader(curTest, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if continueOnError {
|
if continueOnError {
|
||||||
glog.Errorln(err)
|
testLogger.Error("Test failed, continuing", "error", err)
|
||||||
} else {
|
} else {
|
||||||
glog.Fatalln(err)
|
testLogger.Crit("Test failed, aborting", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -189,15 +194,13 @@ func setupApp(c *cli.Context) error {
|
|||||||
if !useStdIn {
|
if !useStdIn {
|
||||||
runSuite(flagTest, flagFile)
|
runSuite(flagTest, flagFile)
|
||||||
} else {
|
} else {
|
||||||
if err := runTestWithReader(flagTest, os.Stdin); err != nil {
|
return runTestWithReader(flagTest, os.Stdin)
|
||||||
glog.Fatalln(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
glog.SetToStderr(true)
|
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
|
||||||
|
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "ethtest"
|
app.Name = "ethtest"
|
||||||
@ -216,7 +219,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := app.Run(os.Args); err != nil {
|
if err := app.Run(os.Args); err != nil {
|
||||||
glog.Fatalln(err)
|
log.Crit("Failed to run the tester", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -111,8 +111,9 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func run(ctx *cli.Context) error {
|
func run(ctx *cli.Context) error {
|
||||||
glog.SetToStderr(true)
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
|
||||||
glog.SetV(ctx.GlobalInt(VerbosityFlag.Name))
|
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
@ -25,8 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -203,11 +202,11 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
|
|||||||
password := getPassPhrase(prompt, false, i, passwords)
|
password := getPassPhrase(prompt, false, i, passwords)
|
||||||
err = ks.Unlock(account, password)
|
err = ks.Unlock(account, password)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
|
log.Info("Unlocked account", "address", account.Address.Hex())
|
||||||
return account, password
|
return account, password
|
||||||
}
|
}
|
||||||
if err, ok := err.(*keystore.AmbiguousAddrError); ok {
|
if err, ok := err.(*keystore.AmbiguousAddrError); ok {
|
||||||
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
|
log.Info("Unlocked account", "address", account.Address.Hex())
|
||||||
return ambiguousAddrRecovery(ks, err, password), password
|
return ambiguousAddrRecovery(ks, err, password), password
|
||||||
}
|
}
|
||||||
if err != keystore.ErrDecrypt {
|
if err != keystore.ErrDecrypt {
|
||||||
@ -217,6 +216,7 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
|
|||||||
}
|
}
|
||||||
// All trials expended to unlock account, bail out
|
// All trials expended to unlock account, bail out
|
||||||
utils.Fatalf("Failed to unlock account %s (%v)", address, err)
|
utils.Fatalf("Failed to unlock account %s (%v)", address, err)
|
||||||
|
|
||||||
return accounts.Account{}, ""
|
return accounts.Account{}, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +145,8 @@ Passphrase: {{.InputLine "foobar"}}
|
|||||||
geth.expectExit()
|
geth.expectExit()
|
||||||
|
|
||||||
wantMessages := []string{
|
wantMessages := []string{
|
||||||
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
|
"Unlocked account",
|
||||||
|
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||||
}
|
}
|
||||||
for _, m := range wantMessages {
|
for _, m := range wantMessages {
|
||||||
if !strings.Contains(geth.stderrText(), m) {
|
if !strings.Contains(geth.stderrText(), m) {
|
||||||
@ -189,8 +190,9 @@ Passphrase: {{.InputLine "foobar"}}
|
|||||||
geth.expectExit()
|
geth.expectExit()
|
||||||
|
|
||||||
wantMessages := []string{
|
wantMessages := []string{
|
||||||
"Unlocked account 7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
"Unlocked account",
|
||||||
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
|
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||||
|
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||||
}
|
}
|
||||||
for _, m := range wantMessages {
|
for _, m := range wantMessages {
|
||||||
if !strings.Contains(geth.stderrText(), m) {
|
if !strings.Contains(geth.stderrText(), m) {
|
||||||
@ -208,8 +210,9 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
|||||||
geth.expectExit()
|
geth.expectExit()
|
||||||
|
|
||||||
wantMessages := []string{
|
wantMessages := []string{
|
||||||
"Unlocked account 7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
"Unlocked account",
|
||||||
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
|
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||||
|
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||||
}
|
}
|
||||||
for _, m := range wantMessages {
|
for _, m := range wantMessages {
|
||||||
if !strings.Contains(geth.stderrText(), m) {
|
if !strings.Contains(geth.stderrText(), m) {
|
||||||
@ -257,7 +260,8 @@ In order to avoid this warning, you need to remove the following duplicate key f
|
|||||||
geth.expectExit()
|
geth.expectExit()
|
||||||
|
|
||||||
wantMessages := []string{
|
wantMessages := []string{
|
||||||
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
|
"Unlocked account",
|
||||||
|
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||||
}
|
}
|
||||||
for _, m := range wantMessages {
|
for _, m := range wantMessages {
|
||||||
if !strings.Contains(geth.stderrText(), m) {
|
if !strings.Contains(geth.stderrText(), m) {
|
||||||
|
@ -32,8 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
"github.com/syndtr/goleveldb/leveldb/util"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@ -129,7 +128,7 @@ func initGenesis(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("failed to write genesis block: %v", err)
|
utils.Fatalf("failed to write genesis block: %v", err)
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
|
log.Info(fmt.Sprintf("successfully wrote genesis block and/or chain rule set: %x", block.Hash()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,7 +256,7 @@ func removeDB(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func upgradeDB(ctx *cli.Context) error {
|
func upgradeDB(ctx *cli.Context) error {
|
||||||
glog.Infoln("Upgrading blockchain database")
|
log.Info(fmt.Sprint("Upgrading blockchain database"))
|
||||||
|
|
||||||
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
||||||
chain, chainDb := utils.MakeChain(ctx, stack)
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
||||||
@ -286,7 +285,7 @@ func upgradeDB(ctx *cli.Context) error {
|
|||||||
utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile)
|
utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile)
|
||||||
} else {
|
} else {
|
||||||
os.Remove(exportFile)
|
os.Remove(exportFile)
|
||||||
glog.Infoln("Import finished")
|
log.Info(fmt.Sprint("Import finished"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -34,8 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -204,11 +203,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
|||||||
}{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
|
}{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
|
||||||
extra, err := rlp.EncodeToBytes(clientInfo)
|
extra, err := rlp.EncodeToBytes(clientInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Warn).Infoln("error setting canonical miner information:", err)
|
log.Warn(fmt.Sprint("error setting canonical miner information:", err))
|
||||||
}
|
}
|
||||||
if uint64(len(extra)) > params.MaximumExtraDataSize {
|
if uint64(len(extra)) > params.MaximumExtraDataSize {
|
||||||
glog.V(logger.Warn).Infoln("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize)
|
log.Warn(fmt.Sprint("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize))
|
||||||
glog.V(logger.Debug).Infof("extra: %x\n", extra)
|
log.Debug(fmt.Sprintf("extra: %x\n", extra))
|
||||||
extra = nil
|
extra = nil
|
||||||
}
|
}
|
||||||
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
||||||
@ -273,7 +272,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||||||
// Open and self derive any wallets already attached
|
// Open and self derive any wallets already attached
|
||||||
for _, wallet := range stack.AccountManager().Wallets() {
|
for _, wallet := range stack.AccountManager().Wallets() {
|
||||||
if err := wallet.Open(""); err != nil {
|
if err := wallet.Open(""); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Failed to open wallet %s: %v", wallet.URL(), err)
|
log.Warn(fmt.Sprintf("Failed to open wallet %s: %v", wallet.URL(), err))
|
||||||
} else {
|
} else {
|
||||||
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||||
}
|
}
|
||||||
@ -282,13 +281,13 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||||||
for event := range events {
|
for event := range events {
|
||||||
if event.Arrive {
|
if event.Arrive {
|
||||||
if err := event.Wallet.Open(""); err != nil {
|
if err := event.Wallet.Open(""); err != nil {
|
||||||
glog.V(logger.Info).Infof("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err)
|
log.Info(fmt.Sprintf("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err))
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Info).Infof("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status())
|
log.Info(fmt.Sprintf("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status()))
|
||||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Info).Infof("Old wallet dropped: %s", event.Wallet.URL())
|
log.Info(fmt.Sprintf("Old wallet dropped: %s", event.Wallet.URL()))
|
||||||
event.Wallet.Close()
|
event.Wallet.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"log"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
@ -46,35 +46,34 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// Enable logging errors, we really do want to see those
|
// Enable logging errors, we really do want to see those
|
||||||
glog.SetV(2)
|
log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat())))
|
||||||
glog.SetToStderr(true)
|
|
||||||
|
|
||||||
// Load the test suite to run the RPC against
|
// Load the test suite to run the RPC against
|
||||||
tests, err := tests.LoadBlockTests(*testFile)
|
tests, err := tests.LoadBlockTests(*testFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load test suite: %v", err)
|
log.Crit(fmt.Sprintf("Failed to load test suite: %v", err))
|
||||||
}
|
}
|
||||||
test, found := tests[*testName]
|
test, found := tests[*testName]
|
||||||
if !found {
|
if !found {
|
||||||
log.Fatalf("Requested test (%s) not found within suite", *testName)
|
log.Crit(fmt.Sprintf("Requested test (%s) not found within suite", *testName))
|
||||||
}
|
}
|
||||||
|
|
||||||
stack, err := MakeSystemNode(*testKey, test)
|
stack, err := MakeSystemNode(*testKey, test)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to assemble test stack: %v", err)
|
log.Crit(fmt.Sprintf("Failed to assemble test stack: %v", err))
|
||||||
}
|
}
|
||||||
if err := stack.Start(); err != nil {
|
if err := stack.Start(); err != nil {
|
||||||
log.Fatalf("Failed to start test node: %v", err)
|
log.Crit(fmt.Sprintf("Failed to start test node: %v", err))
|
||||||
}
|
}
|
||||||
defer stack.Stop()
|
defer stack.Stop()
|
||||||
|
|
||||||
log.Println("Test node started...")
|
log.Info("Test node started...")
|
||||||
|
|
||||||
// Make sure the tests contained within the suite pass
|
// Make sure the tests contained within the suite pass
|
||||||
if err := RunTest(stack, test); err != nil {
|
if err := RunTest(stack, test); err != nil {
|
||||||
log.Fatalf("Failed to run the pre-configured test: %v", err)
|
log.Crit(fmt.Sprintf("Failed to run the pre-configured test: %v", err))
|
||||||
}
|
}
|
||||||
log.Println("Initial test suite passed...")
|
log.Info("Initial test suite passed...")
|
||||||
|
|
||||||
quit := make(chan os.Signal, 1)
|
quit := make(chan os.Signal, 1)
|
||||||
signal.Notify(quit, os.Interrupt)
|
signal.Notify(quit, os.Interrupt)
|
||||||
|
@ -35,8 +35,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
@ -278,7 +277,7 @@ func bzzd(ctx *cli.Context) error {
|
|||||||
signal.Notify(sigc, syscall.SIGTERM)
|
signal.Notify(sigc, syscall.SIGTERM)
|
||||||
defer signal.Stop(sigc)
|
defer signal.Stop(sigc)
|
||||||
<-sigc
|
<-sigc
|
||||||
glog.V(logger.Info).Infoln("Got sigterm, shutting down...")
|
log.Info(fmt.Sprint("Got sigterm, shutting down..."))
|
||||||
stack.Stop()
|
stack.Stop()
|
||||||
}()
|
}()
|
||||||
networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name)
|
networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name)
|
||||||
@ -343,7 +342,7 @@ func getAccount(ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
|
|||||||
}
|
}
|
||||||
// Try to load the arg as a hex key file.
|
// Try to load the arg as a hex key file.
|
||||||
if key, err := crypto.LoadECDSA(keyid); err == nil {
|
if key, err := crypto.LoadECDSA(keyid); err == nil {
|
||||||
glog.V(logger.Info).Infof("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey))
|
log.Info(fmt.Sprintf("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey)))
|
||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
// Otherwise try getting it from the keystore.
|
// Otherwise try getting it from the keystore.
|
||||||
@ -400,7 +399,7 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
|
|||||||
for _, url := range nodes {
|
for _, url := range nodes {
|
||||||
n, err := discover.ParseNode(url)
|
n, err := discover.ParseNode(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("invalid bootnode %q", err)
|
log.Error(fmt.Sprintf("invalid bootnode %q", err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
srv.AddPeer(n)
|
srv.AddPeer(n)
|
||||||
|
@ -18,13 +18,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopkg.in/urfave/cli.v1"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"mime"
|
"mime"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"fmt"
|
|
||||||
"encoding/json"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func add(ctx *cli.Context) {
|
func add(ctx *cli.Context) {
|
||||||
@ -35,23 +36,22 @@ func add(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mhash = args[0]
|
mhash = args[0]
|
||||||
path = args[1]
|
path = args[1]
|
||||||
hash = args[2]
|
hash = args[2]
|
||||||
|
|
||||||
ctype string
|
ctype string
|
||||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||||
mroot manifest
|
mroot manifest
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if len(args) > 3 {
|
if len(args) > 3 {
|
||||||
ctype = args[3]
|
ctype = args[3]
|
||||||
} else {
|
} else {
|
||||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
newManifest := addEntryToManifest (ctx, mhash, path, hash, ctype)
|
newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
|
||||||
fmt.Println(newManifest)
|
fmt.Println(newManifest)
|
||||||
|
|
||||||
if !wantManifest {
|
if !wantManifest {
|
||||||
@ -70,13 +70,13 @@ func update(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mhash = args[0]
|
mhash = args[0]
|
||||||
path = args[1]
|
path = args[1]
|
||||||
hash = args[2]
|
hash = args[2]
|
||||||
|
|
||||||
ctype string
|
ctype string
|
||||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||||
mroot manifest
|
mroot manifest
|
||||||
)
|
)
|
||||||
if len(args) > 3 {
|
if len(args) > 3 {
|
||||||
ctype = args[3]
|
ctype = args[3]
|
||||||
@ -84,7 +84,7 @@ func update(ctx *cli.Context) {
|
|||||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
newManifest := updateEntryInManifest (ctx, mhash, path, hash, ctype)
|
newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
|
||||||
fmt.Println(newManifest)
|
fmt.Println(newManifest)
|
||||||
|
|
||||||
if !wantManifest {
|
if !wantManifest {
|
||||||
@ -102,14 +102,14 @@ func remove(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mhash = args[0]
|
mhash = args[0]
|
||||||
path = args[1]
|
path = args[1]
|
||||||
|
|
||||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||||
mroot manifest
|
mroot manifest
|
||||||
)
|
)
|
||||||
|
|
||||||
newManifest := removeEntryFromManifest (ctx, mhash, path)
|
newManifest := removeEntryFromManifest(ctx, mhash, path)
|
||||||
fmt.Println(newManifest)
|
fmt.Println(newManifest)
|
||||||
|
|
||||||
if !wantManifest {
|
if !wantManifest {
|
||||||
@ -120,15 +120,15 @@ func remove(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) string {
|
func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = &client{api: bzzapi}
|
client = &client{api: bzzapi}
|
||||||
longestPathEntry = manifestEntry{
|
longestPathEntry = manifestEntry{
|
||||||
Path: "",
|
Path: "",
|
||||||
Hash: "",
|
Hash: "",
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -143,12 +143,11 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
|
|||||||
log.Fatalln("hash to add is not present:", err)
|
log.Fatalln("hash to add is not present:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// See if we path is in this Manifest or do we have to dig deeper
|
// See if we path is in this Manifest or do we have to dig deeper
|
||||||
for _, entry := range mroot.Entries {
|
for _, entry := range mroot.Entries {
|
||||||
if path == entry.Path {
|
if path == entry.Path {
|
||||||
log.Fatal(path, "Already present, not adding anything")
|
log.Fatal(path, "Already present, not adding anything")
|
||||||
}else {
|
} else {
|
||||||
if entry.ContentType == "application/bzz-manifest+json" {
|
if entry.ContentType == "application/bzz-manifest+json" {
|
||||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||||
@ -161,7 +160,7 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
|
|||||||
if longestPathEntry.Path != "" {
|
if longestPathEntry.Path != "" {
|
||||||
// Load the child Manifest add the entry there
|
// Load the child Manifest add the entry there
|
||||||
newPath := path[len(longestPathEntry.Path):]
|
newPath := path[len(longestPathEntry.Path):]
|
||||||
newHash := addEntryToManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||||
|
|
||||||
// Replace the hash for parent Manifests
|
// Replace the hash for parent Manifests
|
||||||
newMRoot := manifest{}
|
newMRoot := manifest{}
|
||||||
@ -182,31 +181,28 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
|
|||||||
mroot.Entries = append(mroot.Entries, newEntry)
|
mroot.Entries = append(mroot.Entries, newEntry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
newManifestHash, err := client.uploadManifest(mroot)
|
newManifestHash, err := client.uploadManifest(mroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln("manifest upload failed:", err)
|
log.Fatalln("manifest upload failed:", err)
|
||||||
}
|
}
|
||||||
return newManifestHash
|
return newManifestHash
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string) string {
|
func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = &client{api: bzzapi}
|
client = &client{api: bzzapi}
|
||||||
newEntry = manifestEntry{
|
newEntry = manifestEntry{
|
||||||
Path: "",
|
Path: "",
|
||||||
Hash: "",
|
Hash: "",
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
}
|
}
|
||||||
longestPathEntry = manifestEntry{
|
longestPathEntry = manifestEntry{
|
||||||
Path: "",
|
Path: "",
|
||||||
Hash: "",
|
Hash: "",
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -217,12 +213,11 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
|
|||||||
|
|
||||||
//TODO: check if the "hash" with which to update is valid and present in swarm
|
//TODO: check if the "hash" with which to update is valid and present in swarm
|
||||||
|
|
||||||
|
|
||||||
// See if we path is in this Manifest or do we have to dig deeper
|
// See if we path is in this Manifest or do we have to dig deeper
|
||||||
for _, entry := range mroot.Entries {
|
for _, entry := range mroot.Entries {
|
||||||
if path == entry.Path {
|
if path == entry.Path {
|
||||||
newEntry = entry
|
newEntry = entry
|
||||||
}else {
|
} else {
|
||||||
if entry.ContentType == "application/bzz-manifest+json" {
|
if entry.ContentType == "application/bzz-manifest+json" {
|
||||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||||
@ -239,7 +234,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
|
|||||||
if longestPathEntry.Path != "" {
|
if longestPathEntry.Path != "" {
|
||||||
// Load the child Manifest add the entry there
|
// Load the child Manifest add the entry there
|
||||||
newPath := path[len(longestPathEntry.Path):]
|
newPath := path[len(longestPathEntry.Path):]
|
||||||
newHash := updateEntryInManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||||
|
|
||||||
// Replace the hash for parent Manifests
|
// Replace the hash for parent Manifests
|
||||||
newMRoot := manifest{}
|
newMRoot := manifest{}
|
||||||
@ -271,7 +266,6 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
|
|||||||
mroot = newMRoot
|
mroot = newMRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
newManifestHash, err := client.uploadManifest(mroot)
|
newManifestHash, err := client.uploadManifest(mroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln("manifest upload failed:", err)
|
log.Fatalln("manifest upload failed:", err)
|
||||||
@ -279,20 +273,20 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
|
|||||||
return newManifestHash
|
return newManifestHash
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
|
func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = &client{api: bzzapi}
|
client = &client{api: bzzapi}
|
||||||
entryToRemove = manifestEntry{
|
entryToRemove = manifestEntry{
|
||||||
Path: "",
|
Path: "",
|
||||||
Hash: "",
|
Hash: "",
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
}
|
}
|
||||||
longestPathEntry = manifestEntry{
|
longestPathEntry = manifestEntry{
|
||||||
Path: "",
|
Path: "",
|
||||||
Hash: "",
|
Hash: "",
|
||||||
ContentType: "",
|
ContentType: "",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -301,13 +295,11 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
|
|||||||
log.Fatalln("manifest download failed:", err)
|
log.Fatalln("manifest download failed:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// See if we path is in this Manifest or do we have to dig deeper
|
// See if we path is in this Manifest or do we have to dig deeper
|
||||||
for _, entry := range mroot.Entries {
|
for _, entry := range mroot.Entries {
|
||||||
if path == entry.Path {
|
if path == entry.Path {
|
||||||
entryToRemove = entry
|
entryToRemove = entry
|
||||||
}else {
|
} else {
|
||||||
if entry.ContentType == "application/bzz-manifest+json" {
|
if entry.ContentType == "application/bzz-manifest+json" {
|
||||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||||
@ -324,7 +316,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
|
|||||||
if longestPathEntry.Path != "" {
|
if longestPathEntry.Path != "" {
|
||||||
// Load the child Manifest remove the entry there
|
// Load the child Manifest remove the entry there
|
||||||
newPath := path[len(longestPathEntry.Path):]
|
newPath := path[len(longestPathEntry.Path):]
|
||||||
newHash := removeEntryFromManifest (ctx, longestPathEntry.Hash, newPath)
|
newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
|
||||||
|
|
||||||
// Replace the hash for parent Manifests
|
// Replace the hash for parent Manifests
|
||||||
newMRoot := manifest{}
|
newMRoot := manifest{}
|
||||||
@ -348,13 +340,10 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
|
|||||||
mroot = newMRoot
|
mroot = newMRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
newManifestHash, err := client.uploadManifest(mroot)
|
newManifestHash, err := client.uploadManifest(mroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln("manifest upload failed:", err)
|
log.Fatalln("manifest upload failed:", err)
|
||||||
}
|
}
|
||||||
return newManifestHash
|
return newManifestHash
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ func (c *client) postRaw(mimetype string, size int64, body io.ReadCloser) (strin
|
|||||||
func (c *client) downloadManifest(mhash string) (manifest, error) {
|
func (c *client) downloadManifest(mhash string) (manifest, error) {
|
||||||
|
|
||||||
mroot := manifest{}
|
mroot := manifest{}
|
||||||
req, err := http.NewRequest("GET", c.api + "/bzzr:/" + mhash, nil)
|
req, err := http.NewRequest("GET", c.api+"/bzzr:/"+mhash, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mroot, err
|
return mroot, err
|
||||||
}
|
}
|
||||||
@ -254,4 +254,4 @@ func (c *client) downloadManifest(mhash string) (manifest, error) {
|
|||||||
return mroot, fmt.Errorf("Manifest %v is malformed: %v", mhash, err)
|
return mroot, fmt.Errorf("Manifest %v is malformed: %v", mhash, err)
|
||||||
}
|
}
|
||||||
return mroot, err
|
return mroot, err
|
||||||
}
|
}
|
||||||
|
@ -31,8 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
@ -41,15 +40,6 @@ const (
|
|||||||
importBatchSize = 2500
|
importBatchSize = 2500
|
||||||
)
|
)
|
||||||
|
|
||||||
func openLogFile(Datadir string, filename string) *os.File {
|
|
||||||
path := common.AbsolutePath(Datadir, filename)
|
|
||||||
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("error opening log file '%s': %v", filename, err))
|
|
||||||
}
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf formats a message to standard error and exits the program.
|
// Fatalf formats a message to standard error and exits the program.
|
||||||
// The message is also printed to standard output if standard error
|
// The message is also printed to standard output if standard error
|
||||||
// is redirected to a different file.
|
// is redirected to a different file.
|
||||||
@ -79,12 +69,12 @@ func StartNode(stack *node.Node) {
|
|||||||
signal.Notify(sigc, os.Interrupt)
|
signal.Notify(sigc, os.Interrupt)
|
||||||
defer signal.Stop(sigc)
|
defer signal.Stop(sigc)
|
||||||
<-sigc
|
<-sigc
|
||||||
glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
|
log.Info(fmt.Sprint("Got interrupt, shutting down..."))
|
||||||
go stack.Stop()
|
go stack.Stop()
|
||||||
for i := 10; i > 0; i-- {
|
for i := 10; i > 0; i-- {
|
||||||
<-sigc
|
<-sigc
|
||||||
if i > 1 {
|
if i > 1 {
|
||||||
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
|
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug.Exit() // ensure trace and CPU profile data is flushed.
|
debug.Exit() // ensure trace and CPU profile data is flushed.
|
||||||
@ -115,7 +105,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
defer close(interrupt)
|
defer close(interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
if _, ok := <-interrupt; ok {
|
if _, ok := <-interrupt; ok {
|
||||||
glog.Info("caught interrupt during import, will stop at next batch")
|
log.Info(fmt.Sprint("caught interrupt during import, will stop at next batch"))
|
||||||
}
|
}
|
||||||
close(stop)
|
close(stop)
|
||||||
}()
|
}()
|
||||||
@ -128,7 +118,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infoln("Importing blockchain ", fn)
|
log.Info(fmt.Sprint("Importing blockchain ", fn))
|
||||||
fh, err := os.Open(fn)
|
fh, err := os.Open(fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -176,8 +166,8 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
return fmt.Errorf("interrupted")
|
return fmt.Errorf("interrupted")
|
||||||
}
|
}
|
||||||
if hasAllBlocks(chain, blocks[:i]) {
|
if hasAllBlocks(chain, blocks[:i]) {
|
||||||
glog.Infof("skipping batch %d, all blocks present [%x / %x]",
|
log.Info(fmt.Sprintf("skipping batch %d, all blocks present [%x / %x]",
|
||||||
batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4])
|
batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4]))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,7 +188,7 @@ func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ExportChain(blockchain *core.BlockChain, fn string) error {
|
func ExportChain(blockchain *core.BlockChain, fn string) error {
|
||||||
glog.Infoln("Exporting blockchain to ", fn)
|
log.Info(fmt.Sprint("Exporting blockchain to ", fn))
|
||||||
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -214,13 +204,13 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
|
|||||||
if err := blockchain.Export(writer); err != nil {
|
if err := blockchain.Export(writer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.Infoln("Exported blockchain to ", fn)
|
log.Info(fmt.Sprint("Exported blockchain to ", fn))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
|
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
|
||||||
glog.Infoln("Exporting blockchain to ", fn)
|
log.Info(fmt.Sprint("Exporting blockchain to ", fn))
|
||||||
// TODO verify mode perms
|
// TODO verify mode perms
|
||||||
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -237,6 +227,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
|||||||
if err := blockchain.ExportN(writer, first, last); err != nil {
|
if err := blockchain.ExportN(writer, first, last); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.Infoln("Exported blockchain to ", fn)
|
log.Info(fmt.Sprint("Exported blockchain to ", fn))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -41,8 +41,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethstats"
|
"github.com/ethereum/go-ethereum/ethstats"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
"github.com/ethereum/go-ethereum/les"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
@ -493,7 +492,7 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
|
|||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
node, err := discover.ParseNode(url)
|
node, err := discover.ParseNode(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
|
log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bootnodes = append(bootnodes, node)
|
bootnodes = append(bootnodes, node)
|
||||||
@ -513,7 +512,7 @@ func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node {
|
|||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
node, err := discv5.ParseNode(url)
|
node, err := discv5.ParseNode(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
|
log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bootnodes = append(bootnodes, node)
|
bootnodes = append(bootnodes, node)
|
||||||
@ -610,7 +609,7 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
|
|||||||
func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
|
func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
|
||||||
accounts := ks.Accounts()
|
accounts := ks.Accounts()
|
||||||
if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 {
|
if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 {
|
||||||
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
|
log.Error(fmt.Sprint("WARNING: No etherbase set and no accounts found as default"))
|
||||||
return common.Address{}
|
return common.Address{}
|
||||||
}
|
}
|
||||||
etherbase := ctx.GlobalString(EtherbaseFlag.Name)
|
etherbase := ctx.GlobalString(EtherbaseFlag.Name)
|
||||||
@ -913,10 +912,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
|||||||
if ctx.GlobalBool(TestNetFlag.Name) {
|
if ctx.GlobalBool(TestNetFlag.Name) {
|
||||||
_, err := core.WriteTestNetGenesisBlock(chainDb)
|
_, err := core.WriteTestNetGenesisBlock(chainDb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalln(err)
|
Fatalf("Failed to write testnet genesis: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chainConfig := MakeChainConfigFromDb(ctx, chainDb)
|
chainConfig := MakeChainConfigFromDb(ctx, chainDb)
|
||||||
|
|
||||||
pow := pow.PoW(core.FakePow{})
|
pow := pow.PoW(core.FakePow{})
|
||||||
|
@ -36,8 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||||
@ -82,7 +81,7 @@ var (
|
|||||||
testMode = flag.Bool("t", false, "use of predefined parameters for diagnostics")
|
testMode = flag.Bool("t", false, "use of predefined parameters for diagnostics")
|
||||||
generateKey = flag.Bool("k", false, "generate and show the private key")
|
generateKey = flag.Bool("k", false, "generate and show the private key")
|
||||||
|
|
||||||
argVerbosity = flag.Int("verbosity", logger.Warn, "log verbosity level")
|
argVerbosity = flag.Int("verbosity", int(log.LvlWarn), "log verbosity level")
|
||||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||||
argPoW = flag.Float64("pow", whisper.MinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
|
argPoW = flag.Float64("pow", whisper.MinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
|
||||||
@ -153,8 +152,7 @@ func echo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initialize() {
|
func initialize() {
|
||||||
glog.SetV(*argVerbosity)
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat())))
|
||||||
glog.SetToStderr(true)
|
|
||||||
|
|
||||||
done = make(chan struct{})
|
done = make(chan struct{})
|
||||||
var peers []*discover.Node
|
var peers []*discover.Node
|
||||||
|
@ -22,8 +22,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/robertkrimen/otto"
|
"github.com/robertkrimen/otto"
|
||||||
)
|
)
|
||||||
@ -306,7 +305,7 @@ func setError(resp *otto.Object, code int, msg string) {
|
|||||||
func throwJSException(msg interface{}) otto.Value {
|
func throwJSException(msg interface{}) otto.Value {
|
||||||
val, err := otto.ToValue(msg)
|
val, err := otto.ToValue(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err)
|
log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
|
||||||
}
|
}
|
||||||
panic(val)
|
panic(val)
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
|
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
@ -140,7 +139,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
|
|||||||
|
|
||||||
if (contractAddr != common.Address{}) {
|
if (contractAddr != common.Address{}) {
|
||||||
self.setBalanceFromBlockChain()
|
self.setBalanceFromBlockChain()
|
||||||
glog.V(logger.Detail).Infof("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String())
|
log.Trace(fmt.Sprintf("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -148,7 +147,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
|
|||||||
func (self *Chequebook) setBalanceFromBlockChain() {
|
func (self *Chequebook) setBalanceFromBlockChain() {
|
||||||
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
|
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("can't get balance: %v", err)
|
log.Error(fmt.Sprintf("can't get balance: %v", err))
|
||||||
} else {
|
} else {
|
||||||
self.balance.Set(balance)
|
self.balance.Set(balance)
|
||||||
}
|
}
|
||||||
@ -172,7 +171,7 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
|
|||||||
self.setBalanceFromBlockChain()
|
self.setBalanceFromBlockChain()
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Detail).Infof("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path)
|
log.Trace(fmt.Sprintf("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -227,7 +226,7 @@ func (self *Chequebook) Save() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path)
|
log.Trace(fmt.Sprintf("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path))
|
||||||
|
|
||||||
return ioutil.WriteFile(self.path, data, os.ModePerm)
|
return ioutil.WriteFile(self.path, data, os.ModePerm)
|
||||||
}
|
}
|
||||||
@ -340,12 +339,12 @@ func (self *Chequebook) deposit(amount *big.Int) (string, error) {
|
|||||||
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
|
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
|
||||||
tx, err := chbookRaw.Transfer(depositTransactor)
|
tx, err := chbookRaw.Transfer(depositTransactor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Warn).Infof("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err)
|
log.Warn(fmt.Sprintf("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err))
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// assume that transaction is actually successful, we add the amount to balance right away
|
// assume that transaction is actually successful, we add the amount to balance right away
|
||||||
self.balance.Add(self.balance, amount)
|
self.balance.Add(self.balance, amount)
|
||||||
glog.V(logger.Detail).Infof("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer)
|
log.Trace(fmt.Sprintf("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer))
|
||||||
return tx.Hash().Hex(), nil
|
return tx.Hash().Hex(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,7 +468,7 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
|
|||||||
session: session,
|
session: session,
|
||||||
cashed: new(big.Int).Set(common.Big0),
|
cashed: new(big.Int).Set(common.Big0),
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer))
|
log.Trace(fmt.Sprintf("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -491,7 +490,7 @@ func (self *Inbox) Stop() {
|
|||||||
func (self *Inbox) Cash() (txhash string, err error) {
|
func (self *Inbox) Cash() (txhash string, err error) {
|
||||||
if self.cheque != nil {
|
if self.cheque != nil {
|
||||||
txhash, err = self.cheque.Cash(self.session)
|
txhash, err = self.cheque.Cash(self.session)
|
||||||
glog.V(logger.Detail).Infof("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex())
|
log.Trace(fmt.Sprintf("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()))
|
||||||
self.cashed = self.cheque.Amount
|
self.cashed = self.cheque.Amount
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -575,7 +574,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
|||||||
self.Cash()
|
self.Cash()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed)
|
log.Trace(fmt.Sprintf("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed))
|
||||||
}
|
}
|
||||||
|
|
||||||
return amount, err
|
return amount, err
|
||||||
@ -583,7 +582,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
|||||||
|
|
||||||
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
||||||
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
||||||
glog.V(logger.Detail).Infof("verify cheque: %v - sum: %v", self, sum)
|
log.Trace(fmt.Sprintf("verify cheque: %v - sum: %v", self, sum))
|
||||||
if sum == nil {
|
if sum == nil {
|
||||||
return nil, fmt.Errorf("invalid amount")
|
return nil, fmt.Errorf("invalid amount")
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
"github.com/ethereum/go-ethereum/les"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -128,10 +127,10 @@ func (r *ReleaseService) checker() {
|
|||||||
version, err := r.oracle.CurrentVersion(opts)
|
version, err := r.oracle.CurrentVersion(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == bind.ErrNoCode {
|
if err == bind.ErrNoCode {
|
||||||
glog.V(logger.Debug).Infof("Release oracle not found at %x", r.config.Oracle)
|
log.Debug(fmt.Sprintf("Release oracle not found at %x", r.config.Oracle))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
glog.V(logger.Error).Infof("Failed to retrieve current release: %v", err)
|
log.Error(fmt.Sprintf("Failed to retrieve current release: %v", err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Version was successfully retrieved, notify if newer than ours
|
// Version was successfully retrieved, notify if newer than ours
|
||||||
@ -144,13 +143,13 @@ func (r *ReleaseService) checker() {
|
|||||||
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
|
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
|
||||||
separator := strings.Repeat("-", len(warning))
|
separator := strings.Repeat("-", len(warning))
|
||||||
|
|
||||||
glog.V(logger.Warn).Info(separator)
|
log.Warn(fmt.Sprint(separator))
|
||||||
glog.V(logger.Warn).Info(warning)
|
log.Warn(fmt.Sprint(warning))
|
||||||
glog.V(logger.Warn).Info(howtofix)
|
log.Warn(fmt.Sprint(howtofix))
|
||||||
glog.V(logger.Warn).Info(separator)
|
log.Warn(fmt.Sprint(separator))
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Debug).Infof("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
|
log.Debug(fmt.Sprintf("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
|
||||||
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4])
|
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If termination was requested, return
|
// If termination was requested, return
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
"gopkg.in/fatih/set.v0"
|
"gopkg.in/fatih/set.v0"
|
||||||
@ -169,7 +169,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
|
|||||||
for h := range ancestors {
|
for h := range ancestors {
|
||||||
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
||||||
}
|
}
|
||||||
glog.Infoln(branch)
|
log.Info(fmt.Sprint(branch))
|
||||||
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,8 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
@ -161,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
|
|||||||
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
|
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
|
||||||
// make sure the headerByNumber (if present) is in our current canonical chain
|
// make sure the headerByNumber (if present) is in our current canonical chain
|
||||||
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
|
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
|
||||||
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
|
log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
|
||||||
bc.SetHead(header.Number.Uint64() - 1)
|
bc.SetHead(header.Number.Uint64() - 1)
|
||||||
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
|
log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -220,9 +219,9 @@ func (self *BlockChain) loadLastState() error {
|
|||||||
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
|
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
|
||||||
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
|
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd)
|
log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd))
|
||||||
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
|
log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd))
|
||||||
glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
|
log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -263,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to reset head block hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to reset head fast block hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err))
|
||||||
}
|
}
|
||||||
bc.loadLastState()
|
bc.loadLastState()
|
||||||
}
|
}
|
||||||
@ -287,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
|||||||
self.currentBlock = block
|
self.currentBlock = block
|
||||||
self.mu.Unlock()
|
self.mu.Unlock()
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("committed block #%d [%x…] as new head", block.Number(), hash[:4])
|
log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4]))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
|
|||||||
|
|
||||||
// Prepare the genesis block and reinitialise the chain
|
// Prepare the genesis block and reinitialise the chain
|
||||||
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||||
glog.Fatalf("failed to write genesis block TD: %v", err)
|
log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteBlock(bc.chainDb, genesis); err != nil {
|
if err := WriteBlock(bc.chainDb, genesis); err != nil {
|
||||||
glog.Fatalf("failed to write genesis block: %v", err)
|
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
|
||||||
}
|
}
|
||||||
bc.genesisBlock = genesis
|
bc.genesisBlock = genesis
|
||||||
bc.insert(bc.genesisBlock)
|
bc.insert(bc.genesisBlock)
|
||||||
@ -418,7 +417,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
|||||||
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
|
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1)
|
log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1))
|
||||||
|
|
||||||
for nr := first; nr <= last; nr++ {
|
for nr := first; nr <= last; nr++ {
|
||||||
block := self.GetBlockByNumber(nr)
|
block := self.GetBlockByNumber(nr)
|
||||||
@ -446,10 +445,10 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
|
|
||||||
// Add the block to the canonical chain number scheme and mark as the head
|
// Add the block to the canonical chain number scheme and mark as the head
|
||||||
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||||
glog.Fatalf("failed to insert block number: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert block number: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to insert head block hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err))
|
||||||
}
|
}
|
||||||
bc.currentBlock = block
|
bc.currentBlock = block
|
||||||
|
|
||||||
@ -458,7 +457,7 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
bc.hc.SetCurrentHeader(block.Header())
|
bc.hc.SetCurrentHeader(block.Header())
|
||||||
|
|
||||||
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to insert head fast block hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err))
|
||||||
}
|
}
|
||||||
bc.currentFastBlock = block
|
bc.currentFastBlock = block
|
||||||
}
|
}
|
||||||
@ -590,7 +589,7 @@ func (bc *BlockChain) Stop() {
|
|||||||
|
|
||||||
bc.wg.Wait()
|
bc.wg.Wait()
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Chain manager stopped")
|
log.Info(fmt.Sprint("Chain manager stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *BlockChain) procFutureBlocks() {
|
func (self *BlockChain) procFutureBlocks() {
|
||||||
@ -687,7 +686,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
|
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
|
||||||
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
|
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
|
||||||
|
|
||||||
glog.V(logger.Error).Info(failure.Error())
|
log.Error(fmt.Sprint(failure.Error()))
|
||||||
return 0, failure
|
return 0, failure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -735,31 +734,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write block body: %v", err)
|
errs[index] = fmt.Errorf("failed to write block body: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
glog.Fatal(errs[index])
|
log.Crit(fmt.Sprint(errs[index]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
|
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
glog.Fatal(errs[index])
|
log.Crit(fmt.Sprint(errs[index]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
|
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
|
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
glog.Fatal(errs[index])
|
log.Crit(fmt.Sprint(errs[index]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteTransactions(self.chainDb, block); err != nil {
|
if err := WriteTransactions(self.chainDb, block); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
|
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
glog.Fatal(errs[index])
|
log.Crit(fmt.Sprint(errs[index]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteReceipts(self.chainDb, receipts); err != nil {
|
if err := WriteReceipts(self.chainDb, receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
|
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
glog.Fatal(errs[index])
|
log.Crit(fmt.Sprint(errs[index]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
atomic.AddInt32(&stats.processed, 1)
|
atomic.AddInt32(&stats.processed, 1)
|
||||||
@ -785,7 +784,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
glog.V(logger.Debug).Infoln("premature abort during receipt chain processing")
|
log.Debug(fmt.Sprint("premature abort during receipt chain processing"))
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
// Update the head fast sync block if better
|
// Update the head fast sync block if better
|
||||||
@ -793,7 +792,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
head := blockChain[len(errs)-1]
|
head := blockChain[len(errs)-1]
|
||||||
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
|
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
|
||||||
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to update head fast block hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err))
|
||||||
}
|
}
|
||||||
self.currentFastBlock = head
|
self.currentFastBlock = head
|
||||||
}
|
}
|
||||||
@ -806,7 +805,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
if stats.ignored > 0 {
|
if stats.ignored > 0 {
|
||||||
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored)
|
log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored))
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -830,10 +829,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
|
|||||||
|
|
||||||
// Irrelevant of the canonical status, write the block itself to the database
|
// Irrelevant of the canonical status, write the block itself to the database
|
||||||
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
||||||
glog.Fatalf("failed to write block total difficulty: %v", err)
|
log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteBlock(self.chainDb, block); err != nil {
|
if err := WriteBlock(self.chainDb, block); err != nil {
|
||||||
glog.Fatalf("failed to write block contents: %v", err)
|
log.Crit(fmt.Sprintf("failed to write block contents: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
@ -867,7 +866,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
||||||
i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
||||||
|
|
||||||
glog.V(logger.Error).Info(failure.Error())
|
log.Error(fmt.Sprint(failure.Error()))
|
||||||
return 0, failure
|
return 0, failure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -894,7 +893,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
|
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
|
log.Debug(fmt.Sprint("Premature abort during block chain processing"))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
bstart := time.Now()
|
bstart := time.Now()
|
||||||
@ -991,9 +990,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
|
|
||||||
switch status {
|
switch status {
|
||||||
case CanonStatTy:
|
case CanonStatTy:
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
|
return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
|
||||||
}
|
}})
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
blockInsertTimer.UpdateSince(bstart)
|
||||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||||
|
|
||||||
@ -1014,9 +1013,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
case SideStatTy:
|
case SideStatTy:
|
||||||
if glog.V(logger.Detail) {
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
|
return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
|
||||||
}
|
}})
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
blockInsertTimer.UpdateSince(bstart)
|
||||||
events = append(events, ChainSideEvent{block})
|
events = append(events, ChainSideEvent{block})
|
||||||
|
|
||||||
@ -1025,10 +1024,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stats.processed++
|
stats.processed++
|
||||||
if glog.V(logger.Info) {
|
stats.usedGas += usedGas.Uint64()
|
||||||
stats.usedGas += usedGas.Uint64()
|
stats.report(chain, i)
|
||||||
stats.report(chain, i)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
go self.postChainEvents(events, coalescedLogs)
|
go self.postChainEvents(events, coalescedLogs)
|
||||||
@ -1070,7 +1067,7 @@ func (st *insertStats) report(chain []*types.Block, index int) {
|
|||||||
} else {
|
} else {
|
||||||
hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
|
hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
glog.Infof("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra)
|
log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra))
|
||||||
|
|
||||||
*st = insertStats{startTime: now, lastIndex: index}
|
*st = insertStats{startTime: now, lastIndex: index}
|
||||||
}
|
}
|
||||||
@ -1150,21 +1147,24 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
|||||||
return fmt.Errorf("Invalid new chain")
|
return fmt.Errorf("Invalid new chain")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Ensure the user sees large reorgs
|
||||||
|
logFn := log.Debug
|
||||||
|
if len(oldChain) > 63 {
|
||||||
|
logFn = log.Warn
|
||||||
|
}
|
||||||
|
logFn("", "msg", log.Lazy{Fn: func() string {
|
||||||
|
oldLen, newLen := len(oldChain), len(newChain)
|
||||||
|
newLast, newFirst := newChain[0], newChain[newLen-1]
|
||||||
|
oldLast, oldFirst := oldChain[0], oldChain[oldLen-1]
|
||||||
|
|
||||||
if oldLen := len(oldChain); oldLen > 63 || glog.V(logger.Debug) {
|
return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
|
||||||
newLen := len(newChain)
|
|
||||||
newLast := newChain[0]
|
|
||||||
newFirst := newChain[newLen-1]
|
|
||||||
oldLast := oldChain[0]
|
|
||||||
oldFirst := oldChain[oldLen-1]
|
|
||||||
glog.Infof("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
|
|
||||||
commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
|
commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
|
||||||
oldLen, newLen,
|
oldLen, newLen,
|
||||||
oldFirst.Number(), oldLast.Number(),
|
oldFirst.Number(), oldLast.Number(),
|
||||||
oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
|
oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
|
||||||
newFirst.Number(), newLast.Number(),
|
newFirst.Number(), newLast.Number(),
|
||||||
newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
|
newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
|
||||||
}
|
}})
|
||||||
|
|
||||||
var addedTxs types.Transactions
|
var addedTxs types.Transactions
|
||||||
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
|
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
|
||||||
@ -1271,12 +1271,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) {
|
|||||||
// reportBlock logs a bad block error.
|
// reportBlock logs a bad block error.
|
||||||
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
||||||
bc.addBadBlock(block)
|
bc.addBadBlock(block)
|
||||||
if glog.V(logger.Error) {
|
log.Error("", "msg", log.Lazy{Fn: func() string {
|
||||||
var receiptString string
|
var receiptString string
|
||||||
for _, receipt := range receipts {
|
for _, receipt := range receipts {
|
||||||
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
||||||
}
|
}
|
||||||
glog.Errorf(`
|
return fmt.Sprintf(`
|
||||||
########## BAD BLOCK #########
|
########## BAD BLOCK #########
|
||||||
Chain config: %v
|
Chain config: %v
|
||||||
|
|
||||||
@ -1287,7 +1287,7 @@ Hash: 0x%x
|
|||||||
Error: %v
|
Error: %v
|
||||||
##############################
|
##############################
|
||||||
`, bc.config, block.Number(), block.Hash(), receiptString, err)
|
`, bc.config, block.Number(), block.Hash(), receiptString, err)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||||
|
@ -28,8 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -107,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
|
|||||||
}
|
}
|
||||||
header := new(types.Header)
|
header := new(types.Header)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
glog.Fatalf("failed to decode block header: %v", err)
|
log.Crit(fmt.Sprintf("failed to decode block header: %v", err))
|
||||||
}
|
}
|
||||||
return header.Number.Uint64()
|
return header.Number.Uint64()
|
||||||
}
|
}
|
||||||
@ -167,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
|
|||||||
}
|
}
|
||||||
header := new(types.Header)
|
header := new(types.Header)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
@ -191,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
|
|||||||
}
|
}
|
||||||
body := new(types.Body)
|
body := new(types.Body)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return body
|
return body
|
||||||
@ -209,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
|
|||||||
}
|
}
|
||||||
td := new(big.Int)
|
td := new(big.Int)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return td
|
return td
|
||||||
@ -247,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
|
|||||||
}
|
}
|
||||||
storageReceipts := []*types.ReceiptForStorage{}
|
storageReceipts := []*types.ReceiptForStorage{}
|
||||||
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
receipts := make(types.Receipts, len(storageReceipts))
|
receipts := make(types.Receipts, len(storageReceipts))
|
||||||
@ -294,7 +293,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
|||||||
var receipt types.ReceiptForStorage
|
var receipt types.ReceiptForStorage
|
||||||
err := rlp.DecodeBytes(data, &receipt)
|
err := rlp.DecodeBytes(data, &receipt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infoln("GetReceipt err:", err)
|
log.Debug(fmt.Sprint("GetReceipt err:", err))
|
||||||
}
|
}
|
||||||
return (*types.Receipt)(&receipt)
|
return (*types.Receipt)(&receipt)
|
||||||
}
|
}
|
||||||
@ -303,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
|||||||
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
||||||
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
||||||
if err := db.Put(key, hash.Bytes()); err != nil {
|
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||||
glog.Fatalf("failed to store number to hash mapping into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -311,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
|
|||||||
// WriteHeadHeaderHash stores the head header's hash.
|
// WriteHeadHeaderHash stores the head header's hash.
|
||||||
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
||||||
glog.Fatalf("failed to store last header's hash into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -319,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
|||||||
// WriteHeadBlockHash stores the head block's hash.
|
// WriteHeadBlockHash stores the head block's hash.
|
||||||
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
||||||
glog.Fatalf("failed to store last block's hash into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -327,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
|||||||
// WriteHeadFastBlockHash stores the fast head block's hash.
|
// WriteHeadFastBlockHash stores the fast head block's hash.
|
||||||
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
||||||
glog.Fatalf("failed to store last fast block's hash into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -343,13 +342,13 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
|
|||||||
encNum := encodeBlockNumber(num)
|
encNum := encodeBlockNumber(num)
|
||||||
key := append(blockHashPrefix, hash...)
|
key := append(blockHashPrefix, hash...)
|
||||||
if err := db.Put(key, encNum); err != nil {
|
if err := db.Put(key, encNum); err != nil {
|
||||||
glog.Fatalf("failed to store hash to number mapping into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err))
|
||||||
}
|
}
|
||||||
key = append(append(headerPrefix, encNum...), hash...)
|
key = append(append(headerPrefix, encNum...), hash...)
|
||||||
if err := db.Put(key, data); err != nil {
|
if err := db.Put(key, data); err != nil {
|
||||||
glog.Fatalf("failed to store header into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store header into database: %v", err))
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, hash[:4])
|
log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4]))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,9 +365,9 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
|
|||||||
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
||||||
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
if err := db.Put(key, rlp); err != nil {
|
if err := db.Put(key, rlp); err != nil {
|
||||||
glog.Fatalf("failed to store block body into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store block body into database: %v", err))
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
|
log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4]))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,9 +379,9 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
|
|||||||
}
|
}
|
||||||
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
|
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
|
||||||
if err := db.Put(key, data); err != nil {
|
if err := db.Put(key, data); err != nil {
|
||||||
glog.Fatalf("failed to store block total difficulty into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err))
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
|
log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,9 +414,9 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
|
|||||||
// Store the flattened receipt slice
|
// Store the flattened receipt slice
|
||||||
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
if err := db.Put(key, bytes); err != nil {
|
if err := db.Put(key, bytes); err != nil {
|
||||||
glog.Fatalf("failed to store block receipts into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err))
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4])
|
log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4]))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,7 +457,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
|
|||||||
}
|
}
|
||||||
// Write the scheduled data into the database
|
// Write the scheduled data into the database
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
glog.Fatalf("failed to store transactions into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -490,7 +489,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
|
|||||||
}
|
}
|
||||||
// Write the scheduled data into the database
|
// Write the scheduled data into the database
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
glog.Fatalf("failed to store receipts into database: %v", err)
|
log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -552,7 +551,7 @@ func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
|
|||||||
}
|
}
|
||||||
var block types.StorageBlock
|
var block types.StorageBlock
|
||||||
if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid block RLP for hash %x: %v", hash, err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return (*types.Block)(&block)
|
return (*types.Block)(&block)
|
||||||
@ -623,7 +622,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
|
|||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
|
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%d preimages in block %d, including %d new", len(preimages), number, hitCount)
|
log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -31,8 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -92,7 +91,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
|||||||
}, nil, nil, nil)
|
}, nil, nil, nil)
|
||||||
|
|
||||||
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
|
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
|
||||||
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
|
log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number"))
|
||||||
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
|
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -30,8 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
@ -102,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
||||||
hc.genesisHeader = genesisBlock.Header()
|
hc.genesisHeader = genesisBlock.Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,10 +154,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
|
|
||||||
// Irrelevant of the canonical status, write the td and header to the database
|
// Irrelevant of the canonical status, write the td and header to the database
|
||||||
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
||||||
glog.Fatalf("failed to write header total difficulty: %v", err)
|
log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteHeader(hc.chainDb, header); err != nil {
|
if err := WriteHeader(hc.chainDb, header); err != nil {
|
||||||
glog.Fatalf("failed to write header contents: %v", err)
|
log.Crit(fmt.Sprintf("failed to write header contents: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
@ -189,10 +188,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
|
|
||||||
// Extend the canonical chain with the new header
|
// Extend the canonical chain with the new header
|
||||||
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
||||||
glog.Fatalf("failed to insert header number: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert header number: %v", err))
|
||||||
}
|
}
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
||||||
glog.Fatalf("failed to insert head header hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
||||||
@ -231,7 +230,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
||||||
i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
|
i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
|
||||||
|
|
||||||
glog.V(logger.Error).Info(failure.Error())
|
log.Error(fmt.Sprint(failure.Error()))
|
||||||
return 0, failure
|
return 0, failure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,7 +316,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
for i, header := range chain {
|
for i, header := range chain {
|
||||||
// Short circuit insertion if shutting down
|
// Short circuit insertion if shutting down
|
||||||
if hc.procInterrupt() {
|
if hc.procInterrupt() {
|
||||||
glog.V(logger.Debug).Infoln("premature abort during header chain processing")
|
log.Debug(fmt.Sprint("premature abort during header chain processing"))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
@ -339,7 +338,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
if stats.ignored > 0 {
|
if stats.ignored > 0 {
|
||||||
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
|
log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]))
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -446,7 +445,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
|
|||||||
// SetCurrentHeader sets the current head header of the canonical chain.
|
// SetCurrentHeader sets the current head header of the canonical chain.
|
||||||
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to insert head header hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
|
||||||
}
|
}
|
||||||
hc.currentHeader = head
|
hc.currentHeader = head
|
||||||
hc.currentHeaderHash = head.Hash()
|
hc.currentHeaderHash = head.Hash()
|
||||||
@ -489,7 +488,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
|||||||
hc.currentHeaderHash = hc.currentHeader.Hash()
|
hc.currentHeaderHash = hc.currentHeader.Hash()
|
||||||
|
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
||||||
glog.Fatalf("failed to reset head header hash: %v", err)
|
log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,8 +24,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -135,9 +134,9 @@ func (self *stateObject) markSuicided() {
|
|||||||
self.onDirty(self.Address())
|
self.onDirty(self.Address())
|
||||||
self.onDirty = nil
|
self.onDirty = nil
|
||||||
}
|
}
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
|
return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stateObject) touch() {
|
func (c *stateObject) touch() {
|
||||||
@ -253,9 +252,9 @@ func (c *stateObject) AddBalance(amount *big.Int) {
|
|||||||
}
|
}
|
||||||
c.SetBalance(new(big.Int).Add(c.Balance(), amount))
|
c.SetBalance(new(big.Int).Add(c.Balance(), amount))
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubBalance removes amount from c's balance.
|
// SubBalance removes amount from c's balance.
|
||||||
@ -266,9 +265,9 @@ func (c *stateObject) SubBalance(amount *big.Int) {
|
|||||||
}
|
}
|
||||||
c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
|
c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *stateObject) SetBalance(amount *big.Int) {
|
func (self *stateObject) SetBalance(amount *big.Int) {
|
||||||
|
@ -27,8 +27,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
@ -411,7 +410,7 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
|
|||||||
}
|
}
|
||||||
var data Account
|
var data Account
|
||||||
if err := rlp.DecodeBytes(enc, &data); err != nil {
|
if err := rlp.DecodeBytes(enc, &data); err != nil {
|
||||||
glog.Errorf("can't decode object at %x: %v", addr[:], err)
|
log.Error(fmt.Sprintf("can't decode object at %x: %v", addr[:], err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Insert into the live set.
|
// Insert into the live set.
|
||||||
@ -446,9 +445,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
|
|||||||
newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
|
newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
|
||||||
newobj.setNonce(0) // sets the object to dirty
|
newobj.setNonce(0) // sets the object to dirty
|
||||||
if prev == nil {
|
if prev == nil {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("(+) %x\n", addr)
|
return fmt.Sprintf("(+) %x\n", addr)
|
||||||
}
|
}})
|
||||||
self.journal = append(self.journal, createObjectChange{account: &addr})
|
self.journal = append(self.journal, createObjectChange{account: &addr})
|
||||||
} else {
|
} else {
|
||||||
self.journal = append(self.journal, resetObjectChange{prev: prev})
|
self.journal = append(self.journal, resetObjectChange{prev: prev})
|
||||||
@ -617,7 +616,7 @@ func (s *StateDB) CommitBatch(deleteEmptyObjects bool) (root common.Hash, batch
|
|||||||
batch = s.db.NewBatch()
|
batch = s.db.NewBatch()
|
||||||
root, _ = s.commit(batch, deleteEmptyObjects)
|
root, _ = s.commit(batch, deleteEmptyObjects)
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads())
|
log.Debug(fmt.Sprintf("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads()))
|
||||||
return root, batch
|
return root, batch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
|
|||||||
receipt.Logs = statedb.GetLogs(tx.Hash())
|
receipt.Logs = statedb.GetLogs(tx.Hash())
|
||||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||||
|
|
||||||
glog.V(logger.Debug).Infoln(receipt)
|
log.Debug(fmt.Sprint(receipt))
|
||||||
|
|
||||||
return receipt, gas, err
|
return receipt, gas, err
|
||||||
}
|
}
|
||||||
|
@ -18,12 +18,12 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -255,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
|
|||||||
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
|
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
|
||||||
}
|
}
|
||||||
if vmerr != nil {
|
if vmerr != nil {
|
||||||
glog.V(logger.Debug).Infoln("vm returned with error:", err)
|
log.Debug(fmt.Sprint("vm returned with error:", err))
|
||||||
// The only possible consensus-error would be if there wasn't
|
// The only possible consensus-error would be if there wasn't
|
||||||
// sufficient balance to make the transfer happen. The first
|
// sufficient balance to make the transfer happen. The first
|
||||||
// balance transfer may never fail.
|
// balance transfer may never fail.
|
||||||
|
@ -28,8 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||||
@ -163,12 +162,12 @@ func (pool *TxPool) eventLoop() {
|
|||||||
func (pool *TxPool) resetState() {
|
func (pool *TxPool) resetState() {
|
||||||
currentState, err := pool.currentState()
|
currentState, err := pool.currentState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Failed to get current state: %v", err)
|
log.Error(fmt.Sprintf("Failed to get current state: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
managedState := state.ManageState(currentState)
|
managedState := state.ManageState(currentState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Failed to get managed state: %v", err)
|
log.Error(fmt.Sprintf("Failed to get managed state: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pool.pendingState = managedState
|
pool.pendingState = managedState
|
||||||
@ -193,7 +192,7 @@ func (pool *TxPool) Stop() {
|
|||||||
pool.events.Unsubscribe()
|
pool.events.Unsubscribe()
|
||||||
close(pool.quit)
|
close(pool.quit)
|
||||||
pool.wg.Wait()
|
pool.wg.Wait()
|
||||||
glog.V(logger.Info).Infoln("Transaction pool stopped")
|
log.Info(fmt.Sprint("Transaction pool stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pool *TxPool) State() *state.ManagedState {
|
func (pool *TxPool) State() *state.ManagedState {
|
||||||
@ -334,14 +333,14 @@ func (pool *TxPool) add(tx *types.Transaction) error {
|
|||||||
pool.enqueueTx(hash, tx)
|
pool.enqueueTx(hash, tx)
|
||||||
|
|
||||||
// Print a log message if low enough level is set
|
// Print a log message if low enough level is set
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
rcpt := "[NEW_CONTRACT]"
|
rcpt := "[NEW_CONTRACT]"
|
||||||
if to := tx.To(); to != nil {
|
if to := tx.To(); to != nil {
|
||||||
rcpt = common.Bytes2Hex(to[:4])
|
rcpt = common.Bytes2Hex(to[:4])
|
||||||
}
|
}
|
||||||
from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
|
from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
|
||||||
glog.Infof("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value, hash)
|
return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash)
|
||||||
}
|
}})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -423,7 +422,7 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
|
|||||||
|
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
if err := pool.add(tx); err != nil {
|
if err := pool.add(tx); err != nil {
|
||||||
glog.V(logger.Debug).Infoln("tx error:", err)
|
log.Debug(fmt.Sprint("tx error:", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,32 +513,32 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
|||||||
for addr, list := range pool.queue {
|
for addr, list := range pool.queue {
|
||||||
// Drop all transactions that are deemed too old (low nonce)
|
// Drop all transactions that are deemed too old (low nonce)
|
||||||
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Removed old queued transaction: %v", tx)
|
return fmt.Sprintf("Removed old queued transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
delete(pool.all, tx.Hash())
|
delete(pool.all, tx.Hash())
|
||||||
}
|
}
|
||||||
// Drop all transactions that are too costly (low balance)
|
// Drop all transactions that are too costly (low balance)
|
||||||
drops, _ := list.Filter(state.GetBalance(addr))
|
drops, _ := list.Filter(state.GetBalance(addr))
|
||||||
for _, tx := range drops {
|
for _, tx := range drops {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Removed unpayable queued transaction: %v", tx)
|
return fmt.Sprintf("Removed unpayable queued transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
delete(pool.all, tx.Hash())
|
delete(pool.all, tx.Hash())
|
||||||
queuedNofundsCounter.Inc(1)
|
queuedNofundsCounter.Inc(1)
|
||||||
}
|
}
|
||||||
// Gather all executable transactions and promote them
|
// Gather all executable transactions and promote them
|
||||||
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Promoting queued transaction: %v", tx)
|
return fmt.Sprintf("Promoting queued transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
pool.promoteTx(addr, tx.Hash(), tx)
|
pool.promoteTx(addr, tx.Hash(), tx)
|
||||||
}
|
}
|
||||||
// Drop all transactions over the allowed limit
|
// Drop all transactions over the allowed limit
|
||||||
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
|
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Removed cap-exceeding queued transaction: %v", tx)
|
return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
delete(pool.all, tx.Hash())
|
delete(pool.all, tx.Hash())
|
||||||
queuedRLCounter.Inc(1)
|
queuedRLCounter.Inc(1)
|
||||||
}
|
}
|
||||||
@ -651,24 +650,24 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
|
|||||||
|
|
||||||
// Drop all transactions that are deemed too old (low nonce)
|
// Drop all transactions that are deemed too old (low nonce)
|
||||||
for _, tx := range list.Forward(nonce) {
|
for _, tx := range list.Forward(nonce) {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Removed old pending transaction: %v", tx)
|
return fmt.Sprintf("Removed old pending transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
delete(pool.all, tx.Hash())
|
delete(pool.all, tx.Hash())
|
||||||
}
|
}
|
||||||
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
|
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
|
||||||
drops, invalids := list.Filter(state.GetBalance(addr))
|
drops, invalids := list.Filter(state.GetBalance(addr))
|
||||||
for _, tx := range drops {
|
for _, tx := range drops {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Removed unpayable pending transaction: %v", tx)
|
return fmt.Sprintf("Removed unpayable pending transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
delete(pool.all, tx.Hash())
|
delete(pool.all, tx.Hash())
|
||||||
pendingNofundsCounter.Inc(1)
|
pendingNofundsCounter.Inc(1)
|
||||||
}
|
}
|
||||||
for _, tx := range invalids {
|
for _, tx := range invalids {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Demoting pending transaction: %v", tx)
|
return fmt.Sprintf("Demoting pending transaction: %v", tx)
|
||||||
}
|
}})
|
||||||
pool.enqueueTx(tx.Hash(), tx)
|
pool.enqueueTx(tx.Hash(), tx)
|
||||||
}
|
}
|
||||||
// Delete the entire queue entry if it became empty.
|
// Delete the entire queue entry if it became empty.
|
||||||
|
@ -18,11 +18,11 @@ package vm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"golang.org/x/crypto/ripemd160"
|
"golang.org/x/crypto/ripemd160"
|
||||||
)
|
)
|
||||||
@ -75,14 +75,14 @@ func (c *ecrecover) Run(in []byte) []byte {
|
|||||||
|
|
||||||
// tighter sig s values in homestead only apply to tx sigs
|
// tighter sig s values in homestead only apply to tx sigs
|
||||||
if common.Bytes2Big(in[32:63]).BitLen() > 0 || !crypto.ValidateSignatureValues(v, r, s, false) {
|
if common.Bytes2Big(in[32:63]).BitLen() > 0 || !crypto.ValidateSignatureValues(v, r, s, false) {
|
||||||
glog.V(logger.Detail).Infof("ECRECOVER error: v, r or s value invalid")
|
log.Trace(fmt.Sprintf("ECRECOVER error: v, r or s value invalid"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// v needs to be at the end for libsecp256k1
|
// v needs to be at the end for libsecp256k1
|
||||||
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
|
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
|
||||||
// make sure the public key is a valid one
|
// make sure the public key is a valid one
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Detail).Infoln("ECRECOVER error: ", err)
|
log.Trace(fmt.Sprint("ECRECOVER error: ", err))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,8 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -124,13 +123,13 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("evm running: %x\n", codehash[:4])
|
return fmt.Sprintf("evm running: %x\n", codehash[:4])
|
||||||
tstart := time.Now()
|
}})
|
||||||
defer func() {
|
tstart := time.Now()
|
||||||
glog.Infof("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
|
defer log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
}()
|
return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
|
||||||
}
|
}})
|
||||||
|
|
||||||
// The Interpreter main run loop (contextual). This loop runs until either an
|
// The Interpreter main run loop (contextual). This loop runs until either an
|
||||||
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
||||||
|
@ -16,11 +16,7 @@
|
|||||||
|
|
||||||
package errs
|
package errs
|
||||||
|
|
||||||
import (
|
import "fmt"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Errors implements an error handler providing standardised errors for a package.
|
Errors implements an error handler providing standardised errors for a package.
|
||||||
@ -80,9 +76,3 @@ func (self Error) Error() (message string) {
|
|||||||
}
|
}
|
||||||
return self.message
|
return self.message
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self Error) Log(v glog.Verbose) {
|
|
||||||
if v {
|
|
||||||
v.Infoln(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -37,8 +37,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/miner"
|
"github.com/ethereum/go-ethereum/miner"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -113,7 +112,7 @@ func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
|||||||
if work, err = s.agent.GetWork(); err == nil {
|
if work, err = s.agent.GetWork(); err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%v", err)
|
log.Debug(fmt.Sprintf("%v", err))
|
||||||
return work, fmt.Errorf("mining not ready")
|
return work, fmt.Errorf("mining not ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,8 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/miner"
|
"github.com/ethereum/go-ethereum/miner"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
@ -184,7 +183,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId)
|
log.Info(fmt.Sprintf("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId))
|
||||||
|
|
||||||
if !config.SkipBcVersionCheck {
|
if !config.SkipBcVersionCheck {
|
||||||
bcVersion := core.GetBlockChainVersion(chainDb)
|
bcVersion := core.GetBlockChainVersion(chainDb)
|
||||||
@ -202,7 +201,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.ChainConfig == nil {
|
if config.ChainConfig == nil {
|
||||||
@ -212,7 +211,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||||||
|
|
||||||
eth.chainConfig = config.ChainConfig
|
eth.chainConfig = config.ChainConfig
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Chain config:", eth.chainConfig)
|
log.Info(fmt.Sprint("Chain config:", eth.chainConfig))
|
||||||
|
|
||||||
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording})
|
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -273,7 +272,7 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("Successfully wrote custom genesis block: %x", block.Hash())
|
log.Info(fmt.Sprintf("Successfully wrote custom genesis block: %x", block.Hash()))
|
||||||
}
|
}
|
||||||
// Load up a test setup if directly injected
|
// Load up a test setup if directly injected
|
||||||
if config.TestGenesisState != nil {
|
if config.TestGenesisState != nil {
|
||||||
@ -292,13 +291,13 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
|
|||||||
func CreatePoW(config *Config) (pow.PoW, error) {
|
func CreatePoW(config *Config) (pow.PoW, error) {
|
||||||
switch {
|
switch {
|
||||||
case config.PowFake:
|
case config.PowFake:
|
||||||
glog.V(logger.Info).Infof("ethash used in fake mode")
|
log.Info(fmt.Sprintf("ethash used in fake mode"))
|
||||||
return pow.PoW(core.FakePow{}), nil
|
return pow.PoW(core.FakePow{}), nil
|
||||||
case config.PowTest:
|
case config.PowTest:
|
||||||
glog.V(logger.Info).Infof("ethash used in test mode")
|
log.Info(fmt.Sprintf("ethash used in test mode"))
|
||||||
return ethash.NewForTesting()
|
return ethash.NewForTesting()
|
||||||
case config.PowShared:
|
case config.PowShared:
|
||||||
glog.V(logger.Info).Infof("ethash used in shared mode")
|
log.Info(fmt.Sprintf("ethash used in shared mode"))
|
||||||
return ethash.NewShared(), nil
|
return ethash.NewShared(), nil
|
||||||
default:
|
default:
|
||||||
return ethash.New(), nil
|
return ethash.New(), nil
|
||||||
@ -382,7 +381,7 @@ func (s *Ethereum) StartMining(threads int) error {
|
|||||||
eb, err := s.Etherbase()
|
eb, err := s.Etherbase()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
||||||
glog.V(logger.Error).Infoln(err)
|
log.Error(fmt.Sprint(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go s.miner.Start(eb, threads)
|
go s.miner.Start(eb, threads)
|
||||||
@ -470,14 +469,14 @@ func (self *Ethereum) StartAutoDAG() {
|
|||||||
return // already started
|
return // already started
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir)
|
log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir))
|
||||||
var nextEpoch uint64
|
var nextEpoch uint64
|
||||||
timer := time.After(0)
|
timer := time.After(0)
|
||||||
self.autodagquit = make(chan bool)
|
self.autodagquit = make(chan bool)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timer:
|
case <-timer:
|
||||||
glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir)
|
log.Info(fmt.Sprintf("checking DAG (ethash dir: %s)", ethash.DefaultDir))
|
||||||
currentBlock := self.BlockChain().CurrentBlock().NumberU64()
|
currentBlock := self.BlockChain().CurrentBlock().NumberU64()
|
||||||
thisEpoch := currentBlock / epochLength
|
thisEpoch := currentBlock / epochLength
|
||||||
if nextEpoch <= thisEpoch {
|
if nextEpoch <= thisEpoch {
|
||||||
@ -486,19 +485,19 @@ func (self *Ethereum) StartAutoDAG() {
|
|||||||
previousDag, previousDagFull := dagFiles(thisEpoch - 1)
|
previousDag, previousDagFull := dagFiles(thisEpoch - 1)
|
||||||
os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
|
os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
|
||||||
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
|
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
|
||||||
glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag)
|
log.Info(fmt.Sprintf("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag))
|
||||||
}
|
}
|
||||||
nextEpoch = thisEpoch + 1
|
nextEpoch = thisEpoch + 1
|
||||||
dag, _ := dagFiles(nextEpoch)
|
dag, _ := dagFiles(nextEpoch)
|
||||||
if _, err := os.Stat(dag); os.IsNotExist(err) {
|
if _, err := os.Stat(dag); os.IsNotExist(err) {
|
||||||
glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag)
|
log.Info(fmt.Sprintf("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag))
|
||||||
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
|
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag)
|
log.Error(fmt.Sprintf("Error generating DAG for epoch %d (%s)", nextEpoch, dag))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag)
|
log.Error(fmt.Sprintf("DAG for epoch %d (%s)", nextEpoch, dag))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -516,7 +515,7 @@ func (self *Ethereum) StopAutoDAG() {
|
|||||||
close(self.autodagquit)
|
close(self.autodagquit)
|
||||||
self.autodagquit = nil
|
self.autodagquit = nil
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir)
|
log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// dagFiles(epoch) returns the two alternative DAG filenames (not a path)
|
// dagFiles(epoch) returns the two alternative DAG filenames (not a path)
|
||||||
|
@ -25,8 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,9 +65,9 @@ func sendBadBlockReport(block *types.Block, err error) {
|
|||||||
client := http.Client{Timeout: 8 * time.Second}
|
client := http.Client{Timeout: 8 * time.Second}
|
||||||
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
|
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infoln(err)
|
log.Debug(fmt.Sprint(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
|
log.Debug(fmt.Sprintf("Bad Block Report posted (%d)", resp.StatusCode))
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,7 +49,7 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
|
|||||||
return nil // empty database, nothing to do
|
return nil // empty database, nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys")
|
log.Info(fmt.Sprintf("Upgrading chain database to use sequential keys"))
|
||||||
|
|
||||||
stopChn := make(chan struct{})
|
stopChn := make(chan struct{})
|
||||||
stoppedChn := make(chan struct{})
|
stoppedChn := make(chan struct{})
|
||||||
@ -73,11 +72,11 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
|
|||||||
err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
|
err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
|
||||||
}
|
}
|
||||||
if err == nil && !stopped {
|
if err == nil && !stopped {
|
||||||
glog.V(logger.Info).Infof("Database conversion successful")
|
log.Info(fmt.Sprintf("Database conversion successful"))
|
||||||
db.Put(useSequentialKeys, []byte{42})
|
db.Put(useSequentialKeys, []byte{42})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("Database conversion failed: %v", err)
|
log.Error(fmt.Sprintf("Database conversion failed: %v", err))
|
||||||
}
|
}
|
||||||
close(stoppedChn)
|
close(stoppedChn)
|
||||||
}()
|
}()
|
||||||
@ -106,7 +105,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
|
|||||||
it.Release()
|
it.Release()
|
||||||
it = db.(*ethdb.LDBDatabase).NewIterator()
|
it = db.(*ethdb.LDBDatabase).NewIterator()
|
||||||
it.Seek(keyPtr)
|
it.Seek(keyPtr)
|
||||||
glog.V(logger.Info).Infof("converting %d canonical numbers...", cnt)
|
log.Info(fmt.Sprintf("converting %d canonical numbers...", cnt))
|
||||||
}
|
}
|
||||||
number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
|
number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
|
||||||
newKey := []byte("h12345678n")
|
newKey := []byte("h12345678n")
|
||||||
@ -125,7 +124,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
|
|||||||
it.Next()
|
it.Next()
|
||||||
}
|
}
|
||||||
if cnt > 0 {
|
if cnt > 0 {
|
||||||
glog.V(logger.Info).Infof("converted %d canonical numbers...", cnt)
|
log.Info(fmt.Sprintf("converted %d canonical numbers...", cnt))
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -149,7 +148,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
|
|||||||
it.Release()
|
it.Release()
|
||||||
it = db.(*ethdb.LDBDatabase).NewIterator()
|
it = db.(*ethdb.LDBDatabase).NewIterator()
|
||||||
it.Seek(keyPtr)
|
it.Seek(keyPtr)
|
||||||
glog.V(logger.Info).Infof("converting %d blocks...", cnt)
|
log.Info(fmt.Sprintf("converting %d blocks...", cnt))
|
||||||
}
|
}
|
||||||
// convert header, body, td and block receipts
|
// convert header, body, td and block receipts
|
||||||
var keyPrefix [38]byte
|
var keyPrefix [38]byte
|
||||||
@ -177,7 +176,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cnt > 0 {
|
if cnt > 0 {
|
||||||
glog.V(logger.Info).Infof("converted %d blocks...", cnt)
|
log.Info(fmt.Sprintf("converted %d blocks...", cnt))
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -204,7 +203,7 @@ func upgradeSequentialOrphanedReceipts(db ethdb.Database, stopFn func() bool) (e
|
|||||||
it.Next()
|
it.Next()
|
||||||
}
|
}
|
||||||
if cnt > 0 {
|
if cnt > 0 {
|
||||||
glog.V(logger.Info).Infof("removed %d orphaned block receipts...", cnt)
|
log.Info(fmt.Sprintf("removed %d orphaned block receipts...", cnt))
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -267,7 +266,7 @@ func upgradeChainDatabase(db ethdb.Database) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// At least some of the database is still the old format, upgrade (skip the head block!)
|
// At least some of the database is still the old format, upgrade (skip the head block!)
|
||||||
glog.V(logger.Info).Info("Old database detected, upgrading...")
|
log.Info(fmt.Sprint("Old database detected, upgrading..."))
|
||||||
|
|
||||||
if db, ok := db.(*ethdb.LDBDatabase); ok {
|
if db, ok := db.(*ethdb.LDBDatabase); ok {
|
||||||
blockPrefix := []byte("block-hash-")
|
blockPrefix := []byte("block-hash-")
|
||||||
@ -343,7 +342,7 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tstart := time.Now()
|
tstart := time.Now()
|
||||||
glog.V(logger.Info).Infoln("upgrading db log bloom bins")
|
log.Info(fmt.Sprint("upgrading db log bloom bins"))
|
||||||
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
|
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
|
||||||
hash := core.GetCanonicalHash(db, i)
|
hash := core.GetCanonicalHash(db, i)
|
||||||
if (hash == common.Hash{}) {
|
if (hash == common.Hash{}) {
|
||||||
@ -351,6 +350,6 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
|
|||||||
}
|
}
|
||||||
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
|
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart))
|
log.Info(fmt.Sprint("upgrade completed in", time.Since(tstart)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
@ -249,9 +248,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
|||||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
||||||
|
|
||||||
glog.V(logger.Detail).Infoln("Registering peer", id)
|
log.Trace(fmt.Sprint("Registering peer", id))
|
||||||
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
||||||
glog.V(logger.Error).Infoln("Register failed:", err)
|
log.Error(fmt.Sprint("Register failed:", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.qosReduceConfidence()
|
d.qosReduceConfidence()
|
||||||
@ -264,9 +263,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
|||||||
// the queue.
|
// the queue.
|
||||||
func (d *Downloader) UnregisterPeer(id string) error {
|
func (d *Downloader) UnregisterPeer(id string) error {
|
||||||
// Unregister the peer from the active peer set and revoke any fetch tasks
|
// Unregister the peer from the active peer set and revoke any fetch tasks
|
||||||
glog.V(logger.Detail).Infoln("Unregistering peer", id)
|
log.Trace(fmt.Sprint("Unregistering peer", id))
|
||||||
if err := d.peers.Unregister(id); err != nil {
|
if err := d.peers.Unregister(id); err != nil {
|
||||||
glog.V(logger.Error).Infoln("Unregister failed:", err)
|
log.Error(fmt.Sprint("Unregister failed:", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.queue.Revoke(id)
|
d.queue.Revoke(id)
|
||||||
@ -285,24 +284,24 @@ func (d *Downloader) UnregisterPeer(id string) error {
|
|||||||
// Synchronise tries to sync up our local block chain with a remote peer, both
|
// Synchronise tries to sync up our local block chain with a remote peer, both
|
||||||
// adding various sanity checks as well as wrapping it with various log entries.
|
// adding various sanity checks as well as wrapping it with various log entries.
|
||||||
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
|
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
|
||||||
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
|
log.Trace(fmt.Sprintf("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td))
|
||||||
|
|
||||||
err := d.synchronise(id, head, td, mode)
|
err := d.synchronise(id, head, td, mode)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
glog.V(logger.Detail).Infof("Synchronisation completed")
|
log.Trace(fmt.Sprintf("Synchronisation completed"))
|
||||||
|
|
||||||
case errBusy:
|
case errBusy:
|
||||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
log.Trace(fmt.Sprintf("Synchronisation already in progress"))
|
||||||
|
|
||||||
case errTimeout, errBadPeer, errStallingPeer,
|
case errTimeout, errBadPeer, errStallingPeer,
|
||||||
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
||||||
errInvalidAncestor, errInvalidChain:
|
errInvalidAncestor, errInvalidChain:
|
||||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
log.Debug(fmt.Sprintf("Removing peer %v: %v", id, err))
|
||||||
d.dropPeer(id)
|
d.dropPeer(id)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
|
log.Warn(fmt.Sprintf("Synchronisation failed: %v", err))
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -323,7 +322,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
|||||||
|
|
||||||
// Post a user notification of the sync (only once per session)
|
// Post a user notification of the sync (only once per session)
|
||||||
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
||||||
glog.V(logger.Info).Infoln("Block synchronisation started")
|
log.Info(fmt.Sprint("Block synchronisation started"))
|
||||||
}
|
}
|
||||||
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
||||||
d.queue.Reset()
|
d.queue.Reset()
|
||||||
@ -388,9 +387,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|||||||
return errTooOld
|
return errTooOld
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
|
log.Debug(fmt.Sprintf("Synchronising with the network using: %s [eth/%d]", p.id, p.version))
|
||||||
defer func(start time.Time) {
|
defer func(start time.Time) {
|
||||||
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
|
log.Debug(fmt.Sprintf("Synchronisation terminated after %v", time.Since(start)))
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
// Look up the sync boundaries: the common ancestor and the target block
|
// Look up the sync boundaries: the common ancestor and the target block
|
||||||
@ -438,7 +437,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|||||||
origin = 0
|
origin = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
|
log.Debug(fmt.Sprintf("Fast syncing until pivot block #%d", pivot))
|
||||||
}
|
}
|
||||||
d.queue.Prepare(origin+1, d.mode, pivot, latest)
|
d.queue.Prepare(origin+1, d.mode, pivot, latest)
|
||||||
if d.syncInitHook != nil {
|
if d.syncInitHook != nil {
|
||||||
@ -523,7 +522,7 @@ func (d *Downloader) Terminate() {
|
|||||||
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
||||||
// the total time a pending synchronisation would take.
|
// the total time a pending synchronisation would take.
|
||||||
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||||
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
|
log.Debug(fmt.Sprintf("%v: retrieving remote chain height", p))
|
||||||
|
|
||||||
// Request the advertised remote head block and wait for the response
|
// Request the advertised remote head block and wait for the response
|
||||||
head, _ := p.currentHead()
|
head, _ := p.currentHead()
|
||||||
@ -538,19 +537,19 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||||||
case packet := <-d.headerCh:
|
case packet := <-d.headerCh:
|
||||||
// Discard anything not from the origin peer
|
// Discard anything not from the origin peer
|
||||||
if packet.PeerId() != p.id {
|
if packet.PeerId() != p.id {
|
||||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Make sure the peer actually gave something valid
|
// Make sure the peer actually gave something valid
|
||||||
headers := packet.(*headerPack).headers
|
headers := packet.(*headerPack).headers
|
||||||
if len(headers) != 1 {
|
if len(headers) != 1 {
|
||||||
glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
|
log.Debug(fmt.Sprintf("%v: invalid number of head headers: %d != 1", p, len(headers)))
|
||||||
return nil, errBadPeer
|
return nil, errBadPeer
|
||||||
}
|
}
|
||||||
return headers[0], nil
|
return headers[0], nil
|
||||||
|
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
glog.V(logger.Debug).Infof("%v: head header timeout", p)
|
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
||||||
return nil, errTimeout
|
return nil, errTimeout
|
||||||
|
|
||||||
case <-d.bodyCh:
|
case <-d.bodyCh:
|
||||||
@ -567,7 +566,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||||
// the head links match), we do a binary search to find the common ancestor.
|
// the head links match), we do a binary search to find the common ancestor.
|
||||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
|
log.Debug(fmt.Sprintf("%v: looking for common ancestor (remote height %d)", p, height))
|
||||||
|
|
||||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||||
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
||||||
@ -608,19 +607,19 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
case packet := <-d.headerCh:
|
case packet := <-d.headerCh:
|
||||||
// Discard anything not from the origin peer
|
// Discard anything not from the origin peer
|
||||||
if packet.PeerId() != p.id {
|
if packet.PeerId() != p.id {
|
||||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Make sure the peer actually gave something valid
|
// Make sure the peer actually gave something valid
|
||||||
headers := packet.(*headerPack).headers
|
headers := packet.(*headerPack).headers
|
||||||
if len(headers) == 0 {
|
if len(headers) == 0 {
|
||||||
glog.V(logger.Warn).Infof("%v: empty head header set", p)
|
log.Warn(fmt.Sprintf("%v: empty head header set", p))
|
||||||
return 0, errEmptyHeaderSet
|
return 0, errEmptyHeaderSet
|
||||||
}
|
}
|
||||||
// Make sure the peer's reply conforms to the request
|
// Make sure the peer's reply conforms to the request
|
||||||
for i := 0; i < len(headers); i++ {
|
for i := 0; i < len(headers); i++ {
|
||||||
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
||||||
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
|
log.Warn(fmt.Sprintf("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number))
|
||||||
return 0, errInvalidChain
|
return 0, errInvalidChain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -637,7 +636,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
|
|
||||||
// If every header is known, even future ones, the peer straight out lied about its head
|
// If every header is known, even future ones, the peer straight out lied about its head
|
||||||
if number > height && i == limit-1 {
|
if number > height && i == limit-1 {
|
||||||
glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
|
log.Warn(fmt.Sprintf("%v: lied about chain head: reported %d, found above %d", p, height, number))
|
||||||
return 0, errStallingPeer
|
return 0, errStallingPeer
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -645,7 +644,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
glog.V(logger.Debug).Infof("%v: head header timeout", p)
|
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
||||||
return 0, errTimeout
|
return 0, errTimeout
|
||||||
|
|
||||||
case <-d.bodyCh:
|
case <-d.bodyCh:
|
||||||
@ -657,10 +656,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
// If the head fetch already found an ancestor, return
|
// If the head fetch already found an ancestor, return
|
||||||
if !common.EmptyHash(hash) {
|
if !common.EmptyHash(hash) {
|
||||||
if int64(number) <= floor {
|
if int64(number) <= floor {
|
||||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor))
|
||||||
return 0, errInvalidAncestor
|
return 0, errInvalidAncestor
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, number, hash[:4]))
|
||||||
return number, nil
|
return number, nil
|
||||||
}
|
}
|
||||||
// Ancestor not found, we need to binary search over our chain
|
// Ancestor not found, we need to binary search over our chain
|
||||||
@ -684,13 +683,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
case packer := <-d.headerCh:
|
case packer := <-d.headerCh:
|
||||||
// Discard anything not from the origin peer
|
// Discard anything not from the origin peer
|
||||||
if packer.PeerId() != p.id {
|
if packer.PeerId() != p.id {
|
||||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId())
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packer.PeerId()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Make sure the peer actually gave something valid
|
// Make sure the peer actually gave something valid
|
||||||
headers := packer.(*headerPack).headers
|
headers := packer.(*headerPack).headers
|
||||||
if len(headers) != 1 {
|
if len(headers) != 1 {
|
||||||
glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
|
log.Debug(fmt.Sprintf("%v: invalid search header set (%d)", p, len(headers)))
|
||||||
return 0, errBadPeer
|
return 0, errBadPeer
|
||||||
}
|
}
|
||||||
arrived = true
|
arrived = true
|
||||||
@ -702,13 +701,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
||||||
if header.Number.Uint64() != check {
|
if header.Number.Uint64() != check {
|
||||||
glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check)
|
log.Debug(fmt.Sprintf("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check))
|
||||||
return 0, errBadPeer
|
return 0, errBadPeer
|
||||||
}
|
}
|
||||||
start = check
|
start = check
|
||||||
|
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
glog.V(logger.Debug).Infof("%v: search header timeout", p)
|
log.Debug(fmt.Sprintf("%v: search header timeout", p))
|
||||||
return 0, errTimeout
|
return 0, errTimeout
|
||||||
|
|
||||||
case <-d.bodyCh:
|
case <-d.bodyCh:
|
||||||
@ -720,10 +719,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
// Ensure valid ancestry and return
|
// Ensure valid ancestry and return
|
||||||
if int64(start) <= floor {
|
if int64(start) <= floor {
|
||||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor))
|
||||||
return 0, errInvalidAncestor
|
return 0, errInvalidAncestor
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, start, hash[:4]))
|
||||||
return start, nil
|
return start, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -736,8 +735,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
||||||
// the origin is dropped.
|
// the origin is dropped.
|
||||||
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from)
|
log.Debug(fmt.Sprintf("%v: directing header downloads from #%d", p, from))
|
||||||
defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
|
defer log.Debug(fmt.Sprintf("%v: header download terminated", p))
|
||||||
|
|
||||||
// Create a timeout timer, and the associated header fetcher
|
// Create a timeout timer, and the associated header fetcher
|
||||||
skeleton := true // Skeleton assembly phase or finishing up
|
skeleton := true // Skeleton assembly phase or finishing up
|
||||||
@ -751,10 +750,10 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
timeout.Reset(d.requestTTL())
|
timeout.Reset(d.requestTTL())
|
||||||
|
|
||||||
if skeleton {
|
if skeleton {
|
||||||
glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from)
|
log.Trace(fmt.Sprintf("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from))
|
||||||
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from)
|
log.Trace(fmt.Sprintf("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from))
|
||||||
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -769,7 +768,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
case packet := <-d.headerCh:
|
case packet := <-d.headerCh:
|
||||||
// Make sure the active peer is giving us the skeleton headers
|
// Make sure the active peer is giving us the skeleton headers
|
||||||
if packet.PeerId() != p.id {
|
if packet.PeerId() != p.id {
|
||||||
glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId())
|
log.Debug(fmt.Sprintf("Received skeleton headers from incorrect peer (%s)", packet.PeerId()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
headerReqTimer.UpdateSince(request)
|
headerReqTimer.UpdateSince(request)
|
||||||
@ -783,7 +782,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
}
|
}
|
||||||
// If no more headers are inbound, notify the content fetchers and return
|
// If no more headers are inbound, notify the content fetchers and return
|
||||||
if packet.Items() == 0 {
|
if packet.Items() == 0 {
|
||||||
glog.V(logger.Debug).Infof("%v: no available headers", p)
|
log.Debug(fmt.Sprintf("%v: no available headers", p))
|
||||||
select {
|
select {
|
||||||
case d.headerProcCh <- nil:
|
case d.headerProcCh <- nil:
|
||||||
return nil
|
return nil
|
||||||
@ -797,7 +796,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
if skeleton {
|
if skeleton {
|
||||||
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
|
log.Debug(fmt.Sprintf("%v: skeleton chain invalid: %v", p, err))
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
headers = filled[proced:]
|
headers = filled[proced:]
|
||||||
@ -805,7 +804,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
}
|
}
|
||||||
// Insert all the new headers and fetch the next batch
|
// Insert all the new headers and fetch the next batch
|
||||||
if len(headers) > 0 {
|
if len(headers) > 0 {
|
||||||
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
|
log.Trace(fmt.Sprintf("%v: schedule %d headers from #%d", p, len(headers), from))
|
||||||
select {
|
select {
|
||||||
case d.headerProcCh <- headers:
|
case d.headerProcCh <- headers:
|
||||||
case <-d.cancelCh:
|
case <-d.cancelCh:
|
||||||
@ -817,7 +816,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
|
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
// Header retrieval timed out, consider the peer bad and drop
|
// Header retrieval timed out, consider the peer bad and drop
|
||||||
glog.V(logger.Debug).Infof("%v: header request timed out", p)
|
log.Debug(fmt.Sprintf("%v: header request timed out", p))
|
||||||
headerTimeoutMeter.Mark(1)
|
headerTimeoutMeter.Mark(1)
|
||||||
d.dropPeer(p.id)
|
d.dropPeer(p.id)
|
||||||
|
|
||||||
@ -847,7 +846,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
// The method returs the entire filled skeleton and also the number of headers
|
// The method returs the entire filled skeleton and also the number of headers
|
||||||
// already forwarded for processing.
|
// already forwarded for processing.
|
||||||
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
||||||
glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
|
log.Debug(fmt.Sprintf("Filling up skeleton from #%d", from))
|
||||||
d.queue.ScheduleSkeleton(from, skeleton)
|
d.queue.ScheduleSkeleton(from, skeleton)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -868,7 +867,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
|||||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
||||||
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
|
log.Debug(fmt.Sprintf("Skeleton fill terminated: %v", err))
|
||||||
|
|
||||||
filled, proced := d.queue.RetrieveHeaders()
|
filled, proced := d.queue.RetrieveHeaders()
|
||||||
return filled, proced, err
|
return filled, proced, err
|
||||||
@ -878,7 +877,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
|||||||
// available peers, reserving a chunk of blocks for each, waiting for delivery
|
// available peers, reserving a chunk of blocks for each, waiting for delivery
|
||||||
// and also periodically checking for timeouts.
|
// and also periodically checking for timeouts.
|
||||||
func (d *Downloader) fetchBodies(from uint64) error {
|
func (d *Downloader) fetchBodies(from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
|
log.Debug(fmt.Sprintf("Downloading block bodies from #%d", from))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
deliver = func(packet dataPack) (int, error) {
|
deliver = func(packet dataPack) (int, error) {
|
||||||
@ -894,7 +893,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
|||||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
||||||
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
|
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
|
log.Debug(fmt.Sprintf("Block body download terminated: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -902,7 +901,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
|||||||
// available peers, reserving a chunk of receipts for each, waiting for delivery
|
// available peers, reserving a chunk of receipts for each, waiting for delivery
|
||||||
// and also periodically checking for timeouts.
|
// and also periodically checking for timeouts.
|
||||||
func (d *Downloader) fetchReceipts(from uint64) error {
|
func (d *Downloader) fetchReceipts(from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
|
log.Debug(fmt.Sprintf("Downloading receipts from #%d", from))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
deliver = func(packet dataPack) (int, error) {
|
deliver = func(packet dataPack) (int, error) {
|
||||||
@ -918,7 +917,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|||||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
||||||
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
|
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
|
log.Debug(fmt.Sprintf("Receipt download terminated: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -926,7 +925,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|||||||
// available peers, reserving a chunk of nodes for each, waiting for delivery and
|
// available peers, reserving a chunk of nodes for each, waiting for delivery and
|
||||||
// also periodically checking for timeouts.
|
// also periodically checking for timeouts.
|
||||||
func (d *Downloader) fetchNodeData() error {
|
func (d *Downloader) fetchNodeData() error {
|
||||||
glog.V(logger.Debug).Infof("Downloading node state data")
|
log.Debug(fmt.Sprintf("Downloading node state data"))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
deliver = func(packet dataPack) (int, error) {
|
deliver = func(packet dataPack) (int, error) {
|
||||||
@ -934,12 +933,12 @@ func (d *Downloader) fetchNodeData() error {
|
|||||||
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
|
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
|
||||||
// If the peer returned old-requested data, forgive
|
// If the peer returned old-requested data, forgive
|
||||||
if err == trie.ErrNotRequested {
|
if err == trie.ErrNotRequested {
|
||||||
glog.V(logger.Debug).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId())
|
log.Debug(fmt.Sprintf("peer %s: replied to stale state request, forgiving", packet.PeerId()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the node data processing failed, the root hash is very wrong, abort
|
// If the node data processing failed, the root hash is very wrong, abort
|
||||||
glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
|
log.Error(fmt.Sprintf("peer %s: state processing failed: %v", packet.PeerId(), err))
|
||||||
d.cancel()
|
d.cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -958,12 +957,12 @@ func (d *Downloader) fetchNodeData() error {
|
|||||||
|
|
||||||
// If real database progress was made, reset any fast-sync pivot failure
|
// If real database progress was made, reset any fast-sync pivot failure
|
||||||
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
|
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
|
||||||
glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails))
|
log.Debug(fmt.Sprintf("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)))
|
||||||
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
|
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
|
||||||
}
|
}
|
||||||
// Log a message to the user and return
|
// Log a message to the user and return
|
||||||
if delivered > 0 {
|
if delivered > 0 {
|
||||||
glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
|
log.Info(fmt.Sprintf("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -980,7 +979,7 @@ func (d *Downloader) fetchNodeData() error {
|
|||||||
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
||||||
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
|
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
|
log.Debug(fmt.Sprintf("Node state data download terminated: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1045,11 +1044,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
// Issue a log to the user to see what's going on
|
// Issue a log to the user to see what's going on
|
||||||
switch {
|
switch {
|
||||||
case err == nil && packet.Items() == 0:
|
case err == nil && packet.Items() == 0:
|
||||||
glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
|
log.Trace(fmt.Sprintf("%s: no %s delivered", peer, strings.ToLower(kind)))
|
||||||
case err == nil:
|
case err == nil:
|
||||||
glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
|
log.Trace(fmt.Sprintf("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)))
|
||||||
default:
|
default:
|
||||||
glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
|
log.Trace(fmt.Sprintf("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Blocks assembled, try to update the progress
|
// Blocks assembled, try to update the progress
|
||||||
@ -1092,10 +1091,10 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
||||||
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
||||||
if fails > 2 {
|
if fails > 2 {
|
||||||
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
|
log.Trace(fmt.Sprintf("%s: %s delivery timeout", peer, strings.ToLower(kind)))
|
||||||
setIdle(peer, 0)
|
setIdle(peer, 0)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
|
log.Debug(fmt.Sprintf("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)))
|
||||||
d.dropPeer(pid)
|
d.dropPeer(pid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1103,7 +1102,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
// If there's nothing more to fetch, wait or terminate
|
// If there's nothing more to fetch, wait or terminate
|
||||||
if pending() == 0 {
|
if pending() == 0 {
|
||||||
if !inFlight() && finished {
|
if !inFlight() && finished {
|
||||||
glog.V(logger.Debug).Infof("%s fetching completed", kind)
|
log.Debug(fmt.Sprintf("%s fetching completed", kind))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -1131,15 +1130,15 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
if request == nil {
|
if request == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if glog.V(logger.Detail) {
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||||
if request.From > 0 {
|
if request.From > 0 {
|
||||||
glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
|
return fmt.Sprintf("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
|
||||||
} else if len(request.Headers) > 0 {
|
} else if len(request.Headers) > 0 {
|
||||||
glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
|
return fmt.Sprintf("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
|
return fmt.Sprintf("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
|
||||||
}
|
}
|
||||||
}
|
}})
|
||||||
// Fetch the chunk and make sure any errors return the hashes to the queue
|
// Fetch the chunk and make sure any errors return the hashes to the queue
|
||||||
if fetchHook != nil {
|
if fetchHook != nil {
|
||||||
fetchHook(request.Headers)
|
fetchHook(request.Headers)
|
||||||
@ -1194,8 +1193,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
if d.headBlock != nil {
|
if d.headBlock != nil {
|
||||||
curBlock = d.headBlock().Number()
|
curBlock = d.headBlock().Number()
|
||||||
}
|
}
|
||||||
glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
|
log.Warn(fmt.Sprintf("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
|
||||||
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock)
|
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock))
|
||||||
|
|
||||||
// If we're already past the pivot point, this could be an attack, thread carefully
|
// If we're already past the pivot point, this could be an attack, thread carefully
|
||||||
if rollback[len(rollback)-1].Number.Uint64() > pivot {
|
if rollback[len(rollback)-1].Number.Uint64() > pivot {
|
||||||
@ -1203,7 +1202,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
||||||
for _, header := range rollback {
|
for _, header := range rollback {
|
||||||
if header.Number.Uint64() == pivot {
|
if header.Number.Uint64() == pivot {
|
||||||
glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4])
|
log.Warn(fmt.Sprintf("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]))
|
||||||
d.fsPivotLock = header
|
d.fsPivotLock = header
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1299,7 +1298,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
if n > 0 {
|
if n > 0 {
|
||||||
rollback = append(rollback, chunk[:n]...)
|
rollback = append(rollback, chunk[:n]...)
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err)
|
log.Debug(fmt.Sprintf("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err))
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// All verifications passed, store newly found uncertain headers
|
// All verifications passed, store newly found uncertain headers
|
||||||
@ -1311,7 +1310,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
||||||
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
||||||
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
||||||
glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4])
|
log.Warn(fmt.Sprintf("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]))
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1328,7 +1327,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||||||
// Otherwise insert the headers for content retrieval
|
// Otherwise insert the headers for content retrieval
|
||||||
inserts := d.queue.Schedule(chunk, origin)
|
inserts := d.queue.Schedule(chunk, origin)
|
||||||
if len(inserts) != len(chunk) {
|
if len(inserts) != len(chunk) {
|
||||||
glog.V(logger.Debug).Infof("stale headers")
|
log.Debug(fmt.Sprintf("stale headers"))
|
||||||
return errBadPeer
|
return errBadPeer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1359,10 +1358,10 @@ func (d *Downloader) processContent() error {
|
|||||||
d.chainInsertHook(results)
|
d.chainInsertHook(results)
|
||||||
}
|
}
|
||||||
// Actually import the blocks
|
// Actually import the blocks
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
first, last := results[0].Header, results[len(results)-1].Header
|
first, last := results[0].Header, results[len(results)-1].Header
|
||||||
glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
|
return fmt.Sprintf("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
|
||||||
}
|
}})
|
||||||
for len(results) != 0 {
|
for len(results) != 0 {
|
||||||
// Check for any termination requests
|
// Check for any termination requests
|
||||||
select {
|
select {
|
||||||
@ -1396,14 +1395,14 @@ func (d *Downloader) processContent() error {
|
|||||||
case len(receipts) > 0:
|
case len(receipts) > 0:
|
||||||
index, err = d.insertReceipts(blocks, receipts)
|
index, err = d.insertReceipts(blocks, receipts)
|
||||||
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
||||||
glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4])
|
log.Debug(fmt.Sprintf("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]))
|
||||||
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
index, err = d.insertBlocks(blocks)
|
index, err = d.insertBlocks(blocks)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
|
log.Debug(fmt.Sprintf("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err))
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// Shift the results to the next batch
|
// Shift the results to the next batch
|
||||||
@ -1471,7 +1470,7 @@ func (d *Downloader) qosTuner() {
|
|||||||
atomic.StoreUint64(&d.rttConfidence, conf)
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
||||||
|
|
||||||
// Log the new QoS values and sleep until the next RTT
|
// Log the new QoS values and sleep until the next RTT
|
||||||
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
|
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
||||||
select {
|
select {
|
||||||
case <-d.quitCh:
|
case <-d.quitCh:
|
||||||
return
|
return
|
||||||
@ -1501,7 +1500,7 @@ func (d *Downloader) qosReduceConfidence() {
|
|||||||
atomic.StoreUint64(&d.rttConfidence, conf)
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
||||||
|
|
||||||
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
|
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
|
||||||
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
|
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestRTT returns the current target round trip time for a download request
|
// requestRTT returns the current target round trip time for a download request
|
||||||
|
@ -30,8 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||||
@ -365,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
// Make sure chain order is honoured and preserved throughout
|
// Make sure chain order is honoured and preserved throughout
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
if header.Number == nil || header.Number.Uint64() != from {
|
if header.Number == nil || header.Number.Uint64() != from {
|
||||||
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from)
|
log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
||||||
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4])
|
log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Make sure no duplicate requests are executed
|
// Make sure no duplicate requests are executed
|
||||||
if _, ok := q.blockTaskPool[hash]; ok {
|
if _, ok := q.blockTaskPool[hash]; ok {
|
||||||
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4])
|
log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||||
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4])
|
log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Queue the header for content retrieval
|
// Queue the header for content retrieval
|
||||||
@ -392,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
}
|
}
|
||||||
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
|
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
|
||||||
// Pivoting point of the fast sync, switch the state retrieval to this
|
// Pivoting point of the fast sync, switch the state retrieval to this
|
||||||
glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4])
|
log.Debug(fmt.Sprintf("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||||
|
|
||||||
q.stateTaskIndex = 0
|
q.stateTaskIndex = 0
|
||||||
q.stateTaskPool = make(map[common.Hash]int)
|
q.stateTaskPool = make(map[common.Hash]int)
|
||||||
@ -873,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
accepted := len(headers) == MaxHeaderFetch
|
accepted := len(headers) == MaxHeaderFetch
|
||||||
if accepted {
|
if accepted {
|
||||||
if headers[0].Number.Uint64() != request.From {
|
if headers[0].Number.Uint64() != request.From {
|
||||||
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
|
log.Trace(fmt.Sprintf("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From))
|
||||||
accepted = false
|
accepted = false
|
||||||
} else if headers[len(headers)-1].Hash() != target {
|
} else if headers[len(headers)-1].Hash() != target {
|
||||||
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
|
log.Trace(fmt.Sprintf("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]))
|
||||||
accepted = false
|
accepted = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -884,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
for i, header := range headers[1:] {
|
for i, header := range headers[1:] {
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
||||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
|
log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want))
|
||||||
accepted = false
|
accepted = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if headers[i].Hash() != header.ParentHash {
|
if headers[i].Hash() != header.ParentHash {
|
||||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4])
|
log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]))
|
||||||
accepted = false
|
accepted = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -897,7 +896,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
}
|
}
|
||||||
// If the batch of headers wasn't accepted, mark as unavailable
|
// If the batch of headers wasn't accepted, mark as unavailable
|
||||||
if !accepted {
|
if !accepted {
|
||||||
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
|
log.Trace(fmt.Sprintf("Peer %s: skeleton filling from header #%d not accepted", id, request.From))
|
||||||
|
|
||||||
miss := q.headerPeerMiss[id]
|
miss := q.headerPeerMiss[id]
|
||||||
if miss == nil {
|
if miss == nil {
|
||||||
@ -924,7 +923,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case headerProcCh <- process:
|
case headerProcCh <- process:
|
||||||
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
|
log.Trace(fmt.Sprintf("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number))
|
||||||
q.headerProced += len(process)
|
q.headerProced += len(process)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -221,7 +220,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
|||||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||||
// returning those that should be handled differently.
|
// returning those that should be handled differently.
|
||||||
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
|
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
|
log.Trace(fmt.Sprintf("[eth/62] filtering %d headers", len(headers)))
|
||||||
|
|
||||||
// Send the filter channel to the fetcher
|
// Send the filter channel to the fetcher
|
||||||
filter := make(chan *headerFilterTask)
|
filter := make(chan *headerFilterTask)
|
||||||
@ -249,7 +248,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
|
|||||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||||
// the fetcher, returning those that should be handled differently.
|
// the fetcher, returning those that should be handled differently.
|
||||||
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
|
log.Trace(fmt.Sprintf("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)))
|
||||||
|
|
||||||
// Send the filter channel to the fetcher
|
// Send the filter channel to the fetcher
|
||||||
filter := make(chan *bodyFilterTask)
|
filter := make(chan *bodyFilterTask)
|
||||||
@ -324,14 +323,14 @@ func (f *Fetcher) loop() {
|
|||||||
|
|
||||||
count := f.announces[notification.origin] + 1
|
count := f.announces[notification.origin] + 1
|
||||||
if count > hashLimit {
|
if count > hashLimit {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
|
log.Debug(fmt.Sprintf("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit))
|
||||||
propAnnounceDOSMeter.Mark(1)
|
propAnnounceDOSMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If we have a valid block number, check that it's potentially useful
|
// If we have a valid block number, check that it's potentially useful
|
||||||
if notification.number > 0 {
|
if notification.number > 0 {
|
||||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
|
log.Debug(fmt.Sprintf("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist))
|
||||||
propAnnounceDropMeter.Mark(1)
|
propAnnounceDropMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -381,13 +380,15 @@ func (f *Fetcher) loop() {
|
|||||||
}
|
}
|
||||||
// Send out all block header requests
|
// Send out all block header requests
|
||||||
for peer, hashes := range request {
|
for peer, hashes := range request {
|
||||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
if len(hashes) > 0 {
|
||||||
list := "["
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||||
for _, hash := range hashes {
|
list := "["
|
||||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
for _, hash := range hashes {
|
||||||
}
|
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||||
list = list[:len(list)-2] + "]"
|
}
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
|
list = list[:len(list)-2] + "]"
|
||||||
|
return fmt.Sprintf("[eth/62] Peer %s: fetching headers %s", peer, list)
|
||||||
|
}})
|
||||||
}
|
}
|
||||||
// Create a closure of the fetch and schedule in on a new thread
|
// Create a closure of the fetch and schedule in on a new thread
|
||||||
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
|
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
|
||||||
@ -421,14 +422,16 @@ func (f *Fetcher) loop() {
|
|||||||
}
|
}
|
||||||
// Send out all block body requests
|
// Send out all block body requests
|
||||||
for peer, hashes := range request {
|
for peer, hashes := range request {
|
||||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
if len(hashes) > 0 {
|
||||||
list := "["
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||||
for _, hash := range hashes {
|
list := "["
|
||||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
for _, hash := range hashes {
|
||||||
}
|
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||||
list = list[:len(list)-2] + "]"
|
}
|
||||||
|
list = list[:len(list)-2] + "]"
|
||||||
|
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
|
return fmt.Sprintf("[eth/62] Peer %s: fetching bodies %s", peer, list)
|
||||||
|
}})
|
||||||
}
|
}
|
||||||
// Create a closure of the fetch and schedule in on a new thread
|
// Create a closure of the fetch and schedule in on a new thread
|
||||||
if f.completingHook != nil {
|
if f.completingHook != nil {
|
||||||
@ -462,7 +465,7 @@ func (f *Fetcher) loop() {
|
|||||||
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
||||||
// If the delivered header does not match the promised number, drop the announcer
|
// If the delivered header does not match the promised number, drop the announcer
|
||||||
if header.Number.Uint64() != announce.number {
|
if header.Number.Uint64() != announce.number {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64())
|
log.Trace(fmt.Sprintf("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()))
|
||||||
f.dropPeer(announce.origin)
|
f.dropPeer(announce.origin)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
continue
|
continue
|
||||||
@ -474,7 +477,7 @@ func (f *Fetcher) loop() {
|
|||||||
|
|
||||||
// If the block is empty (header only), short circuit into the final import queue
|
// If the block is empty (header only), short circuit into the final import queue
|
||||||
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||||
|
|
||||||
block := types.NewBlockWithHeader(header)
|
block := types.NewBlockWithHeader(header)
|
||||||
block.ReceivedAt = task.time
|
block.ReceivedAt = task.time
|
||||||
@ -486,7 +489,7 @@ func (f *Fetcher) loop() {
|
|||||||
// Otherwise add to the list of blocks needing completion
|
// Otherwise add to the list of blocks needing completion
|
||||||
incomplete = append(incomplete, announce)
|
incomplete = append(incomplete, announce)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -617,14 +620,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
|||||||
// Ensure the peer isn't DOSing us
|
// Ensure the peer isn't DOSing us
|
||||||
count := f.queues[peer] + 1
|
count := f.queues[peer] + 1
|
||||||
if count > blockLimit {
|
if count > blockLimit {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit))
|
||||||
propBroadcastDOSMeter.Mark(1)
|
propBroadcastDOSMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Discard any past or too distant blocks
|
// Discard any past or too distant blocks
|
||||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist))
|
||||||
propBroadcastDropMeter.Mark(1)
|
propBroadcastDropMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
@ -641,9 +644,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
|||||||
if f.queueChangeHook != nil {
|
if f.queueChangeHook != nil {
|
||||||
f.queueChangeHook(op.block.Hash(), true)
|
f.queueChangeHook(op.block.Hash(), true)
|
||||||
}
|
}
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
return fmt.Sprintf("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -654,14 +657,14 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
|||||||
hash := block.Hash()
|
hash := block.Hash()
|
||||||
|
|
||||||
// Run the import on a new thread
|
// Run the import on a new thread
|
||||||
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4])
|
log.Debug(fmt.Sprintf("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]))
|
||||||
go func() {
|
go func() {
|
||||||
defer func() { f.done <- hash }()
|
defer func() { f.done <- hash }()
|
||||||
|
|
||||||
// If the parent's unknown, abort insertion
|
// If the parent's unknown, abort insertion
|
||||||
parent := f.getBlock(block.ParentHash())
|
parent := f.getBlock(block.ParentHash())
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4])
|
log.Debug(fmt.Sprintf("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Quickly validate the header and propagate the block if it passes
|
// Quickly validate the header and propagate the block if it passes
|
||||||
@ -676,13 +679,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
// Something went very wrong, drop the peer
|
// Something went very wrong, drop the peer
|
||||||
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
|
log.Debug(fmt.Sprintf("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err))
|
||||||
f.dropPeer(peer)
|
f.dropPeer(peer)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Run the actual import and log any issues
|
// Run the actual import and log any issues
|
||||||
if _, err := f.insertChain(types.Blocks{block}); err != nil {
|
if _, err := f.insertChain(types.Blocks{block}); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err)
|
log.Warn(fmt.Sprintf("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If import succeeded, broadcast the block
|
// If import succeeded, broadcast the block
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package gasprice
|
package gasprice
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
@ -25,8 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -176,7 +176,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
|
|||||||
self.lastBase = newBase
|
self.lastBase = newBase
|
||||||
self.lastBaseMutex.Unlock()
|
self.lastBaseMutex.Unlock()
|
||||||
|
|
||||||
glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", i, newBase.Int64())
|
log.Trace(fmt.Sprintf("Processed block #%v, base price is %v\n", i, newBase.Int64()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the lowers possible price with which a tx was or could have been included
|
// returns the lowers possible price with which a tx was or could have been included
|
||||||
|
@ -33,8 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -116,7 +115,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
|||||||
}
|
}
|
||||||
// Figure out whether to allow fast sync or not
|
// Figure out whether to allow fast sync or not
|
||||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
log.Info(fmt.Sprintf("blockchain not empty, fast sync disabled"))
|
||||||
fastSync = false
|
fastSync = false
|
||||||
}
|
}
|
||||||
if fastSync {
|
if fastSync {
|
||||||
@ -179,7 +178,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
|||||||
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||||
|
|
||||||
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
|
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
|
||||||
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
|
log.Debug(fmt.Sprint("Bad Block Reporting is enabled"))
|
||||||
manager.badBlockReportingEnabled = true
|
manager.badBlockReportingEnabled = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,12 +199,12 @@ func (pm *ProtocolManager) removePeer(id string) {
|
|||||||
if peer == nil {
|
if peer == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infoln("Removing peer", id)
|
log.Debug(fmt.Sprint("Removing peer", id))
|
||||||
|
|
||||||
// Unregister the peer from the downloader and Ethereum peer set
|
// Unregister the peer from the downloader and Ethereum peer set
|
||||||
pm.downloader.UnregisterPeer(id)
|
pm.downloader.UnregisterPeer(id)
|
||||||
if err := pm.peers.Unregister(id); err != nil {
|
if err := pm.peers.Unregister(id); err != nil {
|
||||||
glog.V(logger.Error).Infoln("Removal failed:", err)
|
log.Error(fmt.Sprint("Removal failed:", err))
|
||||||
}
|
}
|
||||||
// Hard disconnect at the networking layer
|
// Hard disconnect at the networking layer
|
||||||
if peer != nil {
|
if peer != nil {
|
||||||
@ -227,7 +226,7 @@ func (pm *ProtocolManager) Start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) Stop() {
|
func (pm *ProtocolManager) Stop() {
|
||||||
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
|
log.Info(fmt.Sprint("Stopping ethereum protocol handler..."))
|
||||||
|
|
||||||
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||||
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
||||||
@ -248,7 +247,7 @@ func (pm *ProtocolManager) Stop() {
|
|||||||
// Wait for all peer handler goroutines and the loops to come down.
|
// Wait for all peer handler goroutines and the loops to come down.
|
||||||
pm.wg.Wait()
|
pm.wg.Wait()
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
|
log.Info(fmt.Sprint("Ethereum protocol handler stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||||
@ -262,21 +261,21 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
return p2p.DiscTooManyPeers
|
return p2p.DiscTooManyPeers
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
|
log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
|
||||||
|
|
||||||
// Execute the Ethereum handshake
|
// Execute the Ethereum handshake
|
||||||
td, head, genesis := pm.blockchain.Status()
|
td, head, genesis := pm.blockchain.Status()
|
||||||
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
|
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
|
log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||||
rw.Init(p.version)
|
rw.Init(p.version)
|
||||||
}
|
}
|
||||||
// Register the peer locally
|
// Register the peer locally
|
||||||
glog.V(logger.Detail).Infof("%v: adding peer", p)
|
log.Trace(fmt.Sprintf("%v: adding peer", p))
|
||||||
if err := pm.peers.Register(p); err != nil {
|
if err := pm.peers.Register(p); err != nil {
|
||||||
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
|
log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer pm.removePeer(p.id)
|
defer pm.removePeer(p.id)
|
||||||
@ -297,7 +296,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
}
|
}
|
||||||
// Start a timer to disconnect if the peer doesn't reply in time
|
// Start a timer to disconnect if the peer doesn't reply in time
|
||||||
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
|
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
|
||||||
glog.V(logger.Debug).Infof("%v: timed out DAO fork-check, dropping", p)
|
log.Debug(fmt.Sprintf("%v: timed out DAO fork-check, dropping", p))
|
||||||
pm.removePeer(p.id)
|
pm.removePeer(p.id)
|
||||||
})
|
})
|
||||||
// Make sure it's cleaned up if the peer dies off
|
// Make sure it's cleaned up if the peer dies off
|
||||||
@ -311,7 +310,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
// main loop. handle incoming messages.
|
// main loop. handle incoming messages.
|
||||||
for {
|
for {
|
||||||
if err := pm.handleMsg(p); err != nil {
|
if err := pm.handleMsg(p); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
|
log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -387,7 +386,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
)
|
)
|
||||||
if next <= current {
|
if next <= current {
|
||||||
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
|
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
|
||||||
glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos)
|
log.Warn(fmt.Sprintf("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos))
|
||||||
unknown = true
|
unknown = true
|
||||||
} else {
|
} else {
|
||||||
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
|
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
|
||||||
@ -435,7 +434,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
// If we're seemingly on the same chain, disable the drop timer
|
// If we're seemingly on the same chain, disable the drop timer
|
||||||
if verifyDAO {
|
if verifyDAO {
|
||||||
glog.V(logger.Debug).Infof("%v: seems to be on the same side of the DAO fork", p)
|
log.Debug(fmt.Sprintf("%v: seems to be on the same side of the DAO fork", p))
|
||||||
p.forkDrop.Stop()
|
p.forkDrop.Stop()
|
||||||
p.forkDrop = nil
|
p.forkDrop = nil
|
||||||
return nil
|
return nil
|
||||||
@ -452,10 +451,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
|
|
||||||
// Validate the header and either drop the peer or continue
|
// Validate the header and either drop the peer or continue
|
||||||
if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
|
if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: verified to be on the other side of the DAO fork, dropping", p)
|
log.Debug(fmt.Sprintf("%v: verified to be on the other side of the DAO fork, dropping", p))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p)
|
log.Debug(fmt.Sprintf("%v: verified to be on the same side of the DAO fork", p))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
||||||
@ -464,7 +463,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
if len(headers) > 0 || !filter {
|
if len(headers) > 0 || !filter {
|
||||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infoln(err)
|
log.Debug(fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -517,7 +516,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
|
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
|
||||||
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infoln(err)
|
log.Debug(fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -556,7 +555,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
// Deliver all to the downloader
|
// Deliver all to the downloader
|
||||||
if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
|
if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
|
||||||
glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err)
|
log.Debug(fmt.Sprintf("failed to deliver node state data: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
|
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
|
||||||
@ -587,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
// If known, encode and queue for response packet
|
// If known, encode and queue for response packet
|
||||||
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
||||||
glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
|
log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
|
||||||
} else {
|
} else {
|
||||||
receipts = append(receipts, encoded)
|
receipts = append(receipts, encoded)
|
||||||
bytes += len(encoded)
|
bytes += len(encoded)
|
||||||
@ -603,7 +602,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
// Deliver all to the downloader
|
// Deliver all to the downloader
|
||||||
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
|
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
|
||||||
glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err)
|
log.Debug(fmt.Sprintf("failed to deliver receipts: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
case msg.Code == NewBlockHashesMsg:
|
case msg.Code == NewBlockHashesMsg:
|
||||||
@ -696,7 +695,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
|||||||
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
|
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
|
||||||
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
|
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
|
log.Error(fmt.Sprintf("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Send the block to a subset of our peers
|
// Send the block to a subset of our peers
|
||||||
@ -704,14 +703,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
|||||||
for _, peer := range transfer {
|
for _, peer := range transfer {
|
||||||
peer.SendNewBlock(block, td)
|
peer.SendNewBlock(block, td)
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
|
log.Trace(fmt.Sprintf("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)))
|
||||||
}
|
}
|
||||||
// Otherwise if the block is indeed in out own chain, announce it
|
// Otherwise if the block is indeed in out own chain, announce it
|
||||||
if pm.blockchain.HasBlock(hash) {
|
if pm.blockchain.HasBlock(hash) {
|
||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
|
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
|
log.Trace(fmt.Sprintf("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -724,7 +723,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction)
|
|||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
peer.SendTransactions(types.Transactions{tx})
|
peer.SendTransactions(types.Transactions{tx})
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers")
|
log.Trace(fmt.Sprint("broadcast tx to", len(peers), "peers"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mined broadcast loop
|
// Mined broadcast loop
|
||||||
|
15
eth/peer.go
15
eth/peer.go
@ -25,8 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"gopkg.in/fatih/set.v0"
|
"gopkg.in/fatih/set.v0"
|
||||||
@ -192,41 +191,41 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
|||||||
// RequestHeaders is a wrapper around the header query functions to fetch a
|
// RequestHeaders is a wrapper around the header query functions to fetch a
|
||||||
// single header. It is used solely by the fetcher.
|
// single header. It is used solely by the fetcher.
|
||||||
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash)
|
log.Debug(fmt.Sprintf("%v fetching a single header: %x", p, hash))
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the hash of an origin block.
|
// specified header query, based on the hash of an origin block.
|
||||||
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
|
log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the number of an origin block.
|
// specified header query, based on the number of an origin block.
|
||||||
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
|
log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||||
// specified.
|
// specified.
|
||||||
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
|
log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
|
||||||
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||||
// data, corresponding to the specified hashes.
|
// data, corresponding to the specified hashes.
|
||||||
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes))
|
log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(hashes)))
|
||||||
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||||
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
|
log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
|
||||||
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,8 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// glog.SetToStderr(true)
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat())))
|
||||||
// glog.SetV(6)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
10
eth/sync.go
10
eth/sync.go
@ -17,6 +17,7 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -24,8 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ func (pm *ProtocolManager) txsyncLoop() {
|
|||||||
delete(pending, s.p.ID())
|
delete(pending, s.p.ID())
|
||||||
}
|
}
|
||||||
// Send the pack in the background.
|
// Send the pack in the background.
|
||||||
glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size)
|
log.Trace(fmt.Sprintf("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size))
|
||||||
sending = true
|
sending = true
|
||||||
go func() { done <- pack.p.SendTransactions(pack.txs) }()
|
go func() { done <- pack.p.SendTransactions(pack.txs) }()
|
||||||
}
|
}
|
||||||
@ -117,7 +117,7 @@ func (pm *ProtocolManager) txsyncLoop() {
|
|||||||
sending = false
|
sending = false
|
||||||
// Stop tracking peers that cause send failures.
|
// Stop tracking peers that cause send failures.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err)
|
log.Debug(fmt.Sprintf("%v: tx send failed: %v", pack.p.Peer, err))
|
||||||
delete(pending, pack.p.ID())
|
delete(pending, pack.p.ID())
|
||||||
}
|
}
|
||||||
// Schedule the next send.
|
// Schedule the next send.
|
||||||
@ -187,7 +187,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
|||||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||||
// Disable fast sync if we indeed have something in our chain
|
// Disable fast sync if we indeed have something in our chain
|
||||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
log.Info(fmt.Sprintf("fast sync complete, auto disabling"))
|
||||||
atomic.StoreUint32(&pm.fastSync, 0)
|
atomic.StoreUint32(&pm.fastSync, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
package ethdb
|
package ethdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||||
@ -80,7 +80,7 @@ func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
|
|||||||
if handles < 16 {
|
if handles < 16 {
|
||||||
handles = 16
|
handles = 16
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("Allotted %dMB cache and %d file handles to %s", cache, handles, file)
|
log.Info(fmt.Sprintf("Allotted %dMB cache and %d file handles to %s", cache, handles, file))
|
||||||
|
|
||||||
// Open the db and recover any potential corruptions
|
// Open the db and recover any potential corruptions
|
||||||
db, err := leveldb.OpenFile(file, &opt.Options{
|
db, err := leveldb.OpenFile(file, &opt.Options{
|
||||||
@ -167,16 +167,14 @@ func (self *LDBDatabase) Close() {
|
|||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
self.quitChan <- errc
|
self.quitChan <- errc
|
||||||
if err := <-errc; err != nil {
|
if err := <-errc; err != nil {
|
||||||
glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err)
|
log.Error(fmt.Sprintf("metrics failure in '%s': %v\n", self.fn, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := self.db.Close()
|
err := self.db.Close()
|
||||||
if glog.V(logger.Error) {
|
if err == nil {
|
||||||
if err == nil {
|
log.Info(fmt.Sprint("closed db:", self.fn))
|
||||||
glog.Infoln("closed db:", self.fn)
|
} else {
|
||||||
} else {
|
log.Error(fmt.Sprintf("error closing db %s: %v", self.fn, err))
|
||||||
glog.Errorf("error closing db %s: %v", self.fn, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +229,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
|
|||||||
// Retrieve the database stats
|
// Retrieve the database stats
|
||||||
stats, err := self.db.GetProperty("leveldb.stats")
|
stats, err := self.db.GetProperty("leveldb.stats")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("failed to read database stats: %v", err)
|
log.Error(fmt.Sprintf("failed to read database stats: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Find the compaction table, skip the header
|
// Find the compaction table, skip the header
|
||||||
@ -240,7 +238,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
|
|||||||
lines = lines[1:]
|
lines = lines[1:]
|
||||||
}
|
}
|
||||||
if len(lines) <= 3 {
|
if len(lines) <= 3 {
|
||||||
glog.V(logger.Error).Infof("compaction table not found")
|
log.Error(fmt.Sprintf("compaction table not found"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lines = lines[3:]
|
lines = lines[3:]
|
||||||
@ -256,7 +254,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
|
|||||||
}
|
}
|
||||||
for idx, counter := range parts[3:] {
|
for idx, counter := range parts[3:] {
|
||||||
if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil {
|
if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil {
|
||||||
glog.V(logger.Error).Infof("compaction entry parsing failed: %v", err)
|
log.Error(fmt.Sprintf("compaction entry parsing failed: %v", err))
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
counters[i%2][idx] += value
|
counters[i%2][idx] += value
|
||||||
|
@ -34,8 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
"github.com/ethereum/go-ethereum/les"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -96,13 +95,13 @@ func (s *Service) Start(server *p2p.Server) error {
|
|||||||
s.server = server
|
s.server = server
|
||||||
go s.loop()
|
go s.loop()
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Stats daemon started")
|
log.Info(fmt.Sprint("Stats daemon started"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop implements node.Service, terminating the monitoring and reporting daemon.
|
// Stop implements node.Service, terminating the monitoring and reporting daemon.
|
||||||
func (s *Service) Stop() error {
|
func (s *Service) Stop() error {
|
||||||
glog.V(logger.Info).Infoln("Stats daemon stopped")
|
log.Info(fmt.Sprint("Stats daemon stopped"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,7 +130,7 @@ func (s *Service) loop() {
|
|||||||
}
|
}
|
||||||
conn, err := websocket.Dial(url, "", "http://localhost/")
|
conn, err := websocket.Dial(url, "", "http://localhost/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Warn).Infof("Stats server unreachable: %v", err)
|
log.Warn(fmt.Sprintf("Stats server unreachable: %v", err))
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -139,7 +138,7 @@ func (s *Service) loop() {
|
|||||||
out := json.NewEncoder(conn)
|
out := json.NewEncoder(conn)
|
||||||
|
|
||||||
if err = s.login(in, out); err != nil {
|
if err = s.login(in, out); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Stats login failed: %v", err)
|
log.Warn(fmt.Sprintf("Stats login failed: %v", err))
|
||||||
conn.Close()
|
conn.Close()
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
continue
|
continue
|
||||||
@ -148,12 +147,12 @@ func (s *Service) loop() {
|
|||||||
|
|
||||||
// Send the initial stats so our node looks decent from the get go
|
// Send the initial stats so our node looks decent from the get go
|
||||||
if err = s.report(out); err != nil {
|
if err = s.report(out); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Initial stats report failed: %v", err)
|
log.Warn(fmt.Sprintf("Initial stats report failed: %v", err))
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = s.reportHistory(out, nil); err != nil {
|
if err = s.reportHistory(out, nil); err != nil {
|
||||||
glog.V(logger.Warn).Infof("History report failed: %v", err)
|
log.Warn(fmt.Sprintf("History report failed: %v", err))
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -164,11 +163,11 @@ func (s *Service) loop() {
|
|||||||
select {
|
select {
|
||||||
case <-fullReport.C:
|
case <-fullReport.C:
|
||||||
if err = s.report(out); err != nil {
|
if err = s.report(out); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Full stats report failed: %v", err)
|
log.Warn(fmt.Sprintf("Full stats report failed: %v", err))
|
||||||
}
|
}
|
||||||
case list := <-s.histCh:
|
case list := <-s.histCh:
|
||||||
if err = s.reportHistory(out, list); err != nil {
|
if err = s.reportHistory(out, list); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Block history report failed: %v", err)
|
log.Warn(fmt.Sprintf("Block history report failed: %v", err))
|
||||||
}
|
}
|
||||||
case head, ok := <-headSub.Chan():
|
case head, ok := <-headSub.Chan():
|
||||||
if !ok { // node stopped
|
if !ok { // node stopped
|
||||||
@ -176,10 +175,10 @@ func (s *Service) loop() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil {
|
if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Block stats report failed: %v", err)
|
log.Warn(fmt.Sprintf("Block stats report failed: %v", err))
|
||||||
}
|
}
|
||||||
if err = s.reportPending(out); err != nil {
|
if err = s.reportPending(out); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Post-block transaction stats report failed: %v", err)
|
log.Warn(fmt.Sprintf("Post-block transaction stats report failed: %v", err))
|
||||||
}
|
}
|
||||||
case _, ok := <-txSub.Chan():
|
case _, ok := <-txSub.Chan():
|
||||||
if !ok { // node stopped
|
if !ok { // node stopped
|
||||||
@ -195,7 +194,7 @@ func (s *Service) loop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = s.reportPending(out); err != nil {
|
if err = s.reportPending(out); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Transaction stats report failed: %v", err)
|
log.Warn(fmt.Sprintf("Transaction stats report failed: %v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -216,16 +215,16 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
|||||||
// Retrieve the next generic network packet and bail out on error
|
// Retrieve the next generic network packet and bail out on error
|
||||||
var msg map[string][]interface{}
|
var msg map[string][]interface{}
|
||||||
if err := in.Decode(&msg); err != nil {
|
if err := in.Decode(&msg); err != nil {
|
||||||
glog.V(logger.Warn).Infof("Failed to decode stats server message: %v", err)
|
log.Warn(fmt.Sprintf("Failed to decode stats server message: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(msg["emit"]) == 0 {
|
if len(msg["emit"]) == 0 {
|
||||||
glog.V(logger.Warn).Infof("Stats server sent non-broadcast: %v", msg)
|
log.Warn(fmt.Sprintf("Stats server sent non-broadcast: %v", msg))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
command, ok := msg["emit"][0].(string)
|
command, ok := msg["emit"][0].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(logger.Warn).Infof("Invalid stats server message type: %v", msg["emit"][0])
|
log.Warn(fmt.Sprintf("Invalid stats server message type: %v", msg["emit"][0]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If the message is a ping reply, deliver (someone must be listening!)
|
// If the message is a ping reply, deliver (someone must be listening!)
|
||||||
@ -236,7 +235,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
|||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
// Ping routine dead, abort
|
// Ping routine dead, abort
|
||||||
glog.V(logger.Warn).Infof("Stats server pinger seems to have died")
|
log.Warn(fmt.Sprintf("Stats server pinger seems to have died"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,12 +244,12 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
|||||||
// Make sure the request is valid and doesn't crash us
|
// Make sure the request is valid and doesn't crash us
|
||||||
request, ok := msg["emit"][1].(map[string]interface{})
|
request, ok := msg["emit"][1].(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(logger.Warn).Infof("Invalid history request: %v", msg["emit"][1])
|
log.Warn(fmt.Sprintf("Invalid history request: %v", msg["emit"][1]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
list, ok := request["list"].([]interface{})
|
list, ok := request["list"].([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(logger.Warn).Infof("Invalid history block list: %v", request["list"])
|
log.Warn(fmt.Sprintf("Invalid history block list: %v", request["list"]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Convert the block number list to an integer list
|
// Convert the block number list to an integer list
|
||||||
@ -258,7 +257,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
|||||||
for i, num := range list {
|
for i, num := range list {
|
||||||
n, ok := num.(float64)
|
n, ok := num.(float64)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(logger.Warn).Infof("Invalid history block number: %v", num)
|
log.Warn(fmt.Sprintf("Invalid history block number: %v", num))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
numbers[i] = uint64(n)
|
numbers[i] = uint64(n)
|
||||||
@ -270,7 +269,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Report anything else and continue
|
// Report anything else and continue
|
||||||
glog.V(logger.Info).Infof("Unknown stats message: %v", msg)
|
log.Info(fmt.Sprintf("Unknown stats message: %v", msg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ package debug
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
@ -33,8 +34,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler is the global debugging handler.
|
// Handler is the global debugging handler.
|
||||||
@ -51,23 +51,22 @@ type HandlerT struct {
|
|||||||
traceFile string
|
traceFile string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verbosity sets the glog verbosity ceiling.
|
// Verbosity sets the log verbosity ceiling. The verbosity of individual packages
|
||||||
// The verbosity of individual packages and source files
|
// and source files can be raised using Vmodule.
|
||||||
// can be raised using Vmodule.
|
|
||||||
func (*HandlerT) Verbosity(level int) {
|
func (*HandlerT) Verbosity(level int) {
|
||||||
glog.SetV(level)
|
glogger.Verbosity(log.Lvl(level))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Vmodule sets the glog verbosity pattern. See package
|
// Vmodule sets the log verbosity pattern. See package log for details on the
|
||||||
// glog for details on pattern syntax.
|
// pattern syntax.
|
||||||
func (*HandlerT) Vmodule(pattern string) error {
|
func (*HandlerT) Vmodule(pattern string) error {
|
||||||
return glog.GetVModule().Set(pattern)
|
return glogger.Vmodule(pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BacktraceAt sets the glog backtrace location.
|
// BacktraceAt sets the log backtrace location. See package log for details on
|
||||||
// See package glog for details on pattern syntax.
|
// the pattern syntax.
|
||||||
func (*HandlerT) BacktraceAt(location string) error {
|
func (*HandlerT) BacktraceAt(location string) error {
|
||||||
return glog.GetTraceLocation().Set(location)
|
return glogger.BacktraceAt(location)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemStats returns detailed runtime memory statistics.
|
// MemStats returns detailed runtime memory statistics.
|
||||||
@ -112,7 +111,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
|
|||||||
}
|
}
|
||||||
h.cpuW = f
|
h.cpuW = f
|
||||||
h.cpuFile = file
|
h.cpuFile = file
|
||||||
glog.V(logger.Info).Infoln("CPU profiling started, writing to", h.cpuFile)
|
log.Info(fmt.Sprint("CPU profiling started, writing to", h.cpuFile))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,7 +123,7 @@ func (h *HandlerT) StopCPUProfile() error {
|
|||||||
if h.cpuW == nil {
|
if h.cpuW == nil {
|
||||||
return errors.New("CPU profiling not in progress")
|
return errors.New("CPU profiling not in progress")
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("done writing CPU profile to", h.cpuFile)
|
log.Info(fmt.Sprint("done writing CPU profile to", h.cpuFile))
|
||||||
h.cpuW.Close()
|
h.cpuW.Close()
|
||||||
h.cpuW = nil
|
h.cpuW = nil
|
||||||
h.cpuFile = ""
|
h.cpuFile = ""
|
||||||
@ -180,7 +179,7 @@ func (*HandlerT) Stacks() string {
|
|||||||
|
|
||||||
func writeProfile(name, file string) error {
|
func writeProfile(name, file string) error {
|
||||||
p := pprof.Lookup(name)
|
p := pprof.Lookup(name)
|
||||||
glog.V(logger.Info).Infof("writing %d %s profile records to %s", p.Count(), name, file)
|
log.Info(fmt.Sprintf("writing %d %s profile records to %s", p.Count(), name, file))
|
||||||
f, err := os.Create(expandHome(file))
|
f, err := os.Create(expandHome(file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -20,28 +20,32 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
verbosityFlag = cli.GenericFlag{
|
verbosityFlag = cli.IntFlag{
|
||||||
Name: "verbosity",
|
Name: "verbosity",
|
||||||
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=core, 5=debug, 6=detail",
|
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
|
||||||
Value: glog.GetVerbosity(),
|
Value: 3,
|
||||||
}
|
}
|
||||||
vmoduleFlag = cli.GenericFlag{
|
vmoduleFlag = cli.StringFlag{
|
||||||
Name: "vmodule",
|
Name: "vmodule",
|
||||||
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=6,p2p=5)",
|
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=5,p2p=4)",
|
||||||
Value: glog.GetVModule(),
|
Value: "",
|
||||||
}
|
}
|
||||||
backtraceAtFlag = cli.GenericFlag{
|
backtraceAtFlag = cli.StringFlag{
|
||||||
Name: "backtrace",
|
Name: "backtrace",
|
||||||
Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")",
|
Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")",
|
||||||
Value: glog.GetTraceLocation(),
|
Value: "",
|
||||||
|
}
|
||||||
|
debugFlag = cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
Usage: "Prepends log messages with call-site location (file and line number)",
|
||||||
}
|
}
|
||||||
pprofFlag = cli.BoolFlag{
|
pprofFlag = cli.BoolFlag{
|
||||||
Name: "pprof",
|
Name: "pprof",
|
||||||
@ -78,17 +82,25 @@ var (
|
|||||||
|
|
||||||
// Flags holds all command-line flags required for debugging.
|
// Flags holds all command-line flags required for debugging.
|
||||||
var Flags = []cli.Flag{
|
var Flags = []cli.Flag{
|
||||||
verbosityFlag, vmoduleFlag, backtraceAtFlag,
|
verbosityFlag, vmoduleFlag, backtraceAtFlag, debugFlag,
|
||||||
pprofFlag, pprofAddrFlag, pprofPortFlag,
|
pprofFlag, pprofAddrFlag, pprofPortFlag,
|
||||||
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
|
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// glogger is the glog handler used by Geth, allowing the debug APIs to modify
|
||||||
|
// verbosity levels, vmodules and backtrace locations.
|
||||||
|
var glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
|
||||||
|
|
||||||
// Setup initializes profiling and logging based on the CLI flags.
|
// Setup initializes profiling and logging based on the CLI flags.
|
||||||
// It should be called as early as possible in the program.
|
// It should be called as early as possible in the program.
|
||||||
func Setup(ctx *cli.Context) error {
|
func Setup(ctx *cli.Context) error {
|
||||||
// logging
|
// logging
|
||||||
glog.CopyStandardLogTo("INFO")
|
log.PrintOrigins(ctx.GlobalBool(debugFlag.Name))
|
||||||
glog.SetToStderr(true)
|
|
||||||
|
glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
|
||||||
|
glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
|
||||||
|
glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
// profiling, tracing
|
// profiling, tracing
|
||||||
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
|
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
|
||||||
@ -108,8 +120,8 @@ func Setup(ctx *cli.Context) error {
|
|||||||
if ctx.GlobalBool(pprofFlag.Name) {
|
if ctx.GlobalBool(pprofFlag.Name) {
|
||||||
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
|
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
|
||||||
go func() {
|
go func() {
|
||||||
glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address)
|
log.Info(fmt.Sprintf("starting pprof server at http://%s/debug/pprof", address))
|
||||||
glog.Errorln(http.ListenAndServe(address, nil))
|
log.Error(fmt.Sprint(http.ListenAndServe(address, nil)))
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -20,11 +20,11 @@ package debug
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartGoTrace turns on tracing, writing to the given file.
|
// StartGoTrace turns on tracing, writing to the given file.
|
||||||
@ -44,7 +44,7 @@ func (h *HandlerT) StartGoTrace(file string) error {
|
|||||||
}
|
}
|
||||||
h.traceW = f
|
h.traceW = f
|
||||||
h.traceFile = file
|
h.traceFile = file
|
||||||
glog.V(logger.Info).Infoln("trace started, writing to", h.traceFile)
|
log.Info(fmt.Sprint("trace started, writing to", h.traceFile))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ func (h *HandlerT) StopGoTrace() error {
|
|||||||
if h.traceW == nil {
|
if h.traceW == nil {
|
||||||
return errors.New("trace not in progress")
|
return errors.New("trace not in progress")
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("done writing trace to", h.traceFile)
|
log.Info(fmt.Sprint("done writing trace to", h.traceFile))
|
||||||
h.traceW.Close()
|
h.traceW.Close()
|
||||||
h.traceW = nil
|
h.traceW = nil
|
||||||
h.traceFile = ""
|
h.traceFile = ""
|
||||||
|
@ -36,8 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -475,7 +474,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
|
|||||||
if block != nil {
|
if block != nil {
|
||||||
uncles := block.Uncles()
|
uncles := block.Uncles()
|
||||||
if index >= hexutil.Uint(len(uncles)) {
|
if index >= hexutil.Uint(len(uncles)) {
|
||||||
glog.V(logger.Debug).Infof("uncle block on index %d not found for block #%d", index, blockNr)
|
log.Debug(fmt.Sprintf("uncle block on index %d not found for block #%d", index, blockNr))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
block = types.NewBlockWithHeader(uncles[index])
|
block = types.NewBlockWithHeader(uncles[index])
|
||||||
@ -491,7 +490,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b
|
|||||||
if block != nil {
|
if block != nil {
|
||||||
uncles := block.Uncles()
|
uncles := block.Uncles()
|
||||||
if index >= hexutil.Uint(len(uncles)) {
|
if index >= hexutil.Uint(len(uncles)) {
|
||||||
glog.V(logger.Debug).Infof("uncle block on index %d not found for block %s", index, blockHash.Hex())
|
log.Debug(fmt.Sprintf("uncle block on index %d not found for block %s", index, blockHash.Hex()))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
block = types.NewBlockWithHeader(uncles[index])
|
block = types.NewBlockWithHeader(uncles[index])
|
||||||
@ -577,7 +576,7 @@ type CallArgs struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
|
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
|
||||||
defer func(start time.Time) { glog.V(logger.Debug).Infof("call took %v", time.Since(start)) }(time.Now())
|
defer func(start time.Time) { log.Debug(fmt.Sprintf("call took %v", time.Since(start))) }(time.Now())
|
||||||
|
|
||||||
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
|
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
|
||||||
if state == nil || err != nil {
|
if state == nil || err != nil {
|
||||||
@ -1003,7 +1002,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v\n", err)
|
log.Debug(fmt.Sprintf("%v\n", err))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
} else if tx == nil {
|
} else if tx == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -1015,7 +1014,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
|
|||||||
|
|
||||||
blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v\n", err)
|
log.Debug(fmt.Sprintf("%v\n", err))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1032,7 +1031,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v\n", err)
|
log.Debug(fmt.Sprintf("%v\n", err))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
} else if tx == nil {
|
} else if tx == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -1045,19 +1044,19 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
|
|||||||
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
|
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
|
||||||
receipt := core.GetReceipt(s.b.ChainDb(), txHash)
|
receipt := core.GetReceipt(s.b.ChainDb(), txHash)
|
||||||
if receipt == nil {
|
if receipt == nil {
|
||||||
glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex())
|
log.Debug(fmt.Sprintf("receipt not found for transaction %s", txHash.Hex()))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
|
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v\n", err)
|
log.Debug(fmt.Sprintf("%v\n", err))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v\n", err)
|
log.Debug(fmt.Sprintf("%v\n", err))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1160,9 +1159,9 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
|
|||||||
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
|
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
|
||||||
from, _ := types.Sender(signer, tx)
|
from, _ := types.Sender(signer, tx)
|
||||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||||
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex())
|
log.Info(fmt.Sprintf("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex()))
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex())
|
log.Info(fmt.Sprintf("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex()))
|
||||||
}
|
}
|
||||||
return tx.Hash(), nil
|
return tx.Hash(), nil
|
||||||
}
|
}
|
||||||
@ -1214,9 +1213,9 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||||
glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr)
|
log.Info(fmt.Sprintf("Tx(%x) created: %x\n", tx.Hash(), addr))
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To())
|
log.Info(fmt.Sprintf("Tx(%x) to: %x\n", tx.Hash(), tx.To()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx.Hash().Hex(), nil
|
return tx.Hash().Hex(), nil
|
||||||
@ -1421,10 +1420,10 @@ func (api *PrivateDebugAPI) ChaindbCompact() error {
|
|||||||
return fmt.Errorf("chaindbCompact does not work for memory databases")
|
return fmt.Errorf("chaindbCompact does not work for memory databases")
|
||||||
}
|
}
|
||||||
for b := byte(0); b < 255; b++ {
|
for b := byte(0); b < 255; b++ {
|
||||||
glog.V(logger.Info).Infof("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1)
|
log.Info(fmt.Sprintf("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1))
|
||||||
err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}})
|
err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("compaction error: %v", err)
|
log.Error(fmt.Sprintf("compaction error: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,8 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -188,7 +187,7 @@ func (s *LightEthereum) Protocols() []p2p.Protocol {
|
|||||||
// Start implements node.Service, starting all internal goroutines needed by the
|
// Start implements node.Service, starting all internal goroutines needed by the
|
||||||
// Ethereum protocol implementation.
|
// Ethereum protocol implementation.
|
||||||
func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
||||||
glog.V(logger.Info).Infof("WARNING: light client mode is an experimental feature")
|
log.Info(fmt.Sprintf("WARNING: light client mode is an experimental feature"))
|
||||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
|
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
|
||||||
s.protocolManager.Start(srvr)
|
s.protocolManager.Start(srvr)
|
||||||
return nil
|
return nil
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
package les
|
package les
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -27,8 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -174,7 +174,7 @@ func (f *lightFetcher) syncLoop() {
|
|||||||
f.reqMu.Unlock()
|
f.reqMu.Unlock()
|
||||||
if ok {
|
if ok {
|
||||||
f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
|
f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
|
||||||
glog.V(logger.Debug).Infof("hard timeout by peer %v", req.peer.id)
|
log.Debug(fmt.Sprintf("hard timeout by peer %v", req.peer.id))
|
||||||
go f.pm.removePeer(req.peer.id)
|
go f.pm.removePeer(req.peer.id)
|
||||||
}
|
}
|
||||||
case resp := <-f.deliverChn:
|
case resp := <-f.deliverChn:
|
||||||
@ -192,13 +192,13 @@ func (f *lightFetcher) syncLoop() {
|
|||||||
}
|
}
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
if !ok || !(f.syncing || f.processResponse(req, resp)) {
|
if !ok || !(f.syncing || f.processResponse(req, resp)) {
|
||||||
glog.V(logger.Debug).Infof("failed processing response by peer %v", resp.peer.id)
|
log.Debug(fmt.Sprintf("failed processing response by peer %v", resp.peer.id))
|
||||||
go f.pm.removePeer(resp.peer.id)
|
go f.pm.removePeer(resp.peer.id)
|
||||||
}
|
}
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
case p := <-f.syncDone:
|
case p := <-f.syncDone:
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
glog.V(logger.Debug).Infof("done synchronising with peer %v", p.id)
|
log.Debug(fmt.Sprintf("done synchronising with peer %v", p.id))
|
||||||
f.checkSyncedHeaders(p)
|
f.checkSyncedHeaders(p)
|
||||||
f.syncing = false
|
f.syncing = false
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
@ -239,17 +239,17 @@ func (f *lightFetcher) removePeer(p *peer) {
|
|||||||
func (f *lightFetcher) announce(p *peer, head *announceData) {
|
func (f *lightFetcher) announce(p *peer, head *announceData) {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
glog.V(logger.Debug).Infof("received announce from peer %v #%d %016x reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth)
|
log.Debug(fmt.Sprintf("received announce from peer %v #%d %016x reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth))
|
||||||
|
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
glog.V(logger.Debug).Infof("announce: unknown peer")
|
log.Debug(fmt.Sprintf("announce: unknown peer"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
|
if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
|
||||||
// announced tds should be strictly monotonic
|
// announced tds should be strictly monotonic
|
||||||
glog.V(logger.Debug).Infof("non-monotonic Td from peer %v", p.id)
|
log.Debug(fmt.Sprintf("non-monotonic Td from peer %v", p.id))
|
||||||
go f.pm.removePeer(p.id)
|
go f.pm.removePeer(p.id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -355,14 +355,14 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bo
|
|||||||
func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) {
|
func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) {
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
glog.V(logger.Debug).Infof("request: unknown peer")
|
log.Debug(fmt.Sprintf("request: unknown peer"))
|
||||||
p.fcServer.DeassignRequest(reqID)
|
p.fcServer.DeassignRequest(reqID)
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) {
|
if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) {
|
||||||
f.syncing = true
|
f.syncing = true
|
||||||
go func() {
|
go func() {
|
||||||
glog.V(logger.Debug).Infof("synchronising with peer %v", p.id)
|
log.Debug(fmt.Sprintf("synchronising with peer %v", p.id))
|
||||||
f.pm.synchronise(p)
|
f.pm.synchronise(p)
|
||||||
f.syncDone <- p
|
f.syncDone <- p
|
||||||
}()
|
}()
|
||||||
@ -457,7 +457,7 @@ func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types
|
|||||||
// processResponse processes header download request responses, returns true if successful
|
// processResponse processes header download request responses, returns true if successful
|
||||||
func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
|
func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
|
||||||
if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
|
if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
|
||||||
glog.V(logger.Debug).Infof("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8])
|
log.Debug(fmt.Sprintf("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
headers := make([]*types.Header, req.amount)
|
headers := make([]*types.Header, req.amount)
|
||||||
@ -468,14 +468,14 @@ func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) boo
|
|||||||
if err == core.BlockFutureErr {
|
if err == core.BlockFutureErr {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("InsertHeaderChain error: %v", err)
|
log.Debug(fmt.Sprintf("InsertHeaderChain error: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
tds := make([]*big.Int, len(headers))
|
tds := make([]*big.Int, len(headers))
|
||||||
for i, header := range headers {
|
for i, header := range headers {
|
||||||
td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
|
td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
|
||||||
if td == nil {
|
if td == nil {
|
||||||
glog.V(logger.Debug).Infof("TD not found for header %v of %v", i+1, len(headers))
|
log.Debug(fmt.Sprintf("TD not found for header %v of %v", i+1, len(headers)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
tds[i] = td
|
tds[i] = td
|
||||||
@ -490,7 +490,7 @@ func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
|
|||||||
var maxTd *big.Int
|
var maxTd *big.Int
|
||||||
for p, fp := range f.peers {
|
for p, fp := range f.peers {
|
||||||
if !f.checkAnnouncedHeaders(fp, headers, tds) {
|
if !f.checkAnnouncedHeaders(fp, headers, tds) {
|
||||||
glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id)
|
log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
|
||||||
go f.pm.removePeer(p.id)
|
go f.pm.removePeer(p.id)
|
||||||
}
|
}
|
||||||
if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
|
if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
|
||||||
@ -576,7 +576,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
|
|||||||
func (f *lightFetcher) checkSyncedHeaders(p *peer) {
|
func (f *lightFetcher) checkSyncedHeaders(p *peer) {
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
glog.V(logger.Debug).Infof("checkSyncedHeaders: unknown peer")
|
log.Debug(fmt.Sprintf("checkSyncedHeaders: unknown peer"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n := fp.lastAnnounced
|
n := fp.lastAnnounced
|
||||||
@ -589,7 +589,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) {
|
|||||||
}
|
}
|
||||||
// now n is the latest downloaded header after syncing
|
// now n is the latest downloaded header after syncing
|
||||||
if n == nil {
|
if n == nil {
|
||||||
glog.V(logger.Debug).Infof("synchronisation failed with peer %v", p.id)
|
log.Debug(fmt.Sprintf("synchronisation failed with peer %v", p.id))
|
||||||
go f.pm.removePeer(p.id)
|
go f.pm.removePeer(p.id)
|
||||||
} else {
|
} else {
|
||||||
header := f.chain.GetHeader(n.hash, n.number)
|
header := f.chain.GetHeader(n.hash, n.number)
|
||||||
@ -610,12 +610,12 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
|
|||||||
|
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
glog.V(logger.Debug).Infof("checkKnownNode: unknown peer")
|
log.Debug(fmt.Sprintf("checkKnownNode: unknown peer"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
header := f.chain.GetHeader(n.hash, n.number)
|
header := f.chain.GetHeader(n.hash, n.number)
|
||||||
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
||||||
glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id)
|
log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
|
||||||
go f.pm.removePeer(p.id)
|
go f.pm.removePeer(p.id)
|
||||||
}
|
}
|
||||||
if fp.confirmedTd != nil {
|
if fp.confirmedTd != nil {
|
||||||
@ -700,7 +700,7 @@ func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
|
|||||||
now := mclock.Now()
|
now := mclock.Now()
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
glog.V(logger.Debug).Infof("checkUpdateStats: unknown peer")
|
log.Debug(fmt.Sprintf("checkUpdateStats: unknown peer"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if newEntry != nil && fp.firstUpdateStats == nil {
|
if newEntry != nil && fp.firstUpdateStats == nil {
|
||||||
|
@ -34,8 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||||
@ -199,7 +198,7 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
|
|||||||
}
|
}
|
||||||
|
|
||||||
if lightSync {
|
if lightSync {
|
||||||
glog.V(logger.Debug).Infof("LES: create downloader")
|
log.Debug(fmt.Sprintf("LES: create downloader"))
|
||||||
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
|
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
|
||||||
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
|
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
|
||||||
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
|
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
|
||||||
@ -230,12 +229,12 @@ func (pm *ProtocolManager) removePeer(id string) {
|
|||||||
if err == errNotRegistered {
|
if err == errNotRegistered {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.V(logger.Error).Infoln("Removal failed:", err)
|
log.Error(fmt.Sprint("Removal failed:", err))
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infoln("Removing peer", id)
|
log.Debug(fmt.Sprint("Removing peer", id))
|
||||||
|
|
||||||
// Unregister the peer from the downloader and Ethereum peer set
|
// Unregister the peer from the downloader and Ethereum peer set
|
||||||
glog.V(logger.Debug).Infof("LES: unregister peer %v", id)
|
log.Debug(fmt.Sprintf("LES: unregister peer %v", id))
|
||||||
if pm.lightSync {
|
if pm.lightSync {
|
||||||
pm.downloader.UnregisterPeer(id)
|
pm.downloader.UnregisterPeer(id)
|
||||||
if pm.txrelay != nil {
|
if pm.txrelay != nil {
|
||||||
@ -268,9 +267,9 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
|
|||||||
} else {
|
} else {
|
||||||
if topicDisc != nil {
|
if topicDisc != nil {
|
||||||
go func() {
|
go func() {
|
||||||
glog.V(logger.Info).Infoln("Starting registering topic", string(lesTopic))
|
log.Info(fmt.Sprint("Starting registering topic", string(lesTopic)))
|
||||||
topicDisc.RegisterTopic(lesTopic, pm.quitSync)
|
topicDisc.RegisterTopic(lesTopic, pm.quitSync)
|
||||||
glog.V(logger.Info).Infoln("Stopped registering topic", string(lesTopic))
|
log.Info(fmt.Sprint("Stopped registering topic", string(lesTopic)))
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
@ -283,7 +282,7 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
|
|||||||
func (pm *ProtocolManager) Stop() {
|
func (pm *ProtocolManager) Stop() {
|
||||||
// Showing a log message. During download / process this could actually
|
// Showing a log message. During download / process this could actually
|
||||||
// take between 5 to 10 seconds and therefor feedback is required.
|
// take between 5 to 10 seconds and therefor feedback is required.
|
||||||
glog.V(logger.Info).Infoln("Stopping light ethereum protocol handler...")
|
log.Info(fmt.Sprint("Stopping light ethereum protocol handler..."))
|
||||||
|
|
||||||
// Quit the sync loop.
|
// Quit the sync loop.
|
||||||
// After this send has completed, no new peers will be accepted.
|
// After this send has completed, no new peers will be accepted.
|
||||||
@ -300,7 +299,7 @@ func (pm *ProtocolManager) Stop() {
|
|||||||
// Wait for any process action
|
// Wait for any process action
|
||||||
pm.wg.Wait()
|
pm.wg.Wait()
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Light ethereum protocol handler stopped")
|
log.Info(fmt.Sprint("Light ethereum protocol handler stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||||
@ -310,22 +309,22 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
|
|||||||
// handle is the callback invoked to manage the life cycle of a les peer. When
|
// handle is the callback invoked to manage the life cycle of a les peer. When
|
||||||
// this function terminates, the peer is disconnected.
|
// this function terminates, the peer is disconnected.
|
||||||
func (pm *ProtocolManager) handle(p *peer) error {
|
func (pm *ProtocolManager) handle(p *peer) error {
|
||||||
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
|
log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
|
||||||
|
|
||||||
// Execute the LES handshake
|
// Execute the LES handshake
|
||||||
td, head, genesis := pm.blockchain.Status()
|
td, head, genesis := pm.blockchain.Status()
|
||||||
headNum := core.GetBlockNumber(pm.chainDb, head)
|
headNum := core.GetBlockNumber(pm.chainDb, head)
|
||||||
if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
|
if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
|
log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||||
rw.Init(p.version)
|
rw.Init(p.version)
|
||||||
}
|
}
|
||||||
// Register the peer locally
|
// Register the peer locally
|
||||||
glog.V(logger.Detail).Infof("%v: adding peer", p)
|
log.Trace(fmt.Sprintf("%v: adding peer", p))
|
||||||
if err := pm.peers.Register(p); err != nil {
|
if err := pm.peers.Register(p); err != nil {
|
||||||
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
|
log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -336,7 +335,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
||||||
glog.V(logger.Debug).Infof("LES: register peer %v", p.id)
|
log.Debug(fmt.Sprintf("LES: register peer %v", p.id))
|
||||||
if pm.lightSync {
|
if pm.lightSync {
|
||||||
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
|
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||||
reqID := getNextReqID()
|
reqID := getNextReqID()
|
||||||
@ -390,7 +389,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||||||
// main loop. handle incoming messages.
|
// main loop. handle incoming messages.
|
||||||
for {
|
for {
|
||||||
if err := pm.handleMsg(p); err != nil {
|
if err := pm.handleMsg(p); err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
|
log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -407,7 +406,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infoln("msg:", msg.Code, msg.Size)
|
log.Debug(fmt.Sprint("msg:", msg.Code, msg.Size))
|
||||||
|
|
||||||
costs := p.fcCosts[msg.Code]
|
costs := p.fcCosts[msg.Code]
|
||||||
reject := func(reqCnt, maxCnt uint64) bool {
|
reject := func(reqCnt, maxCnt uint64) bool {
|
||||||
@ -420,7 +419,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
cost = pm.server.defParams.BufLimit
|
cost = pm.server.defParams.BufLimit
|
||||||
}
|
}
|
||||||
if cost > bufValue {
|
if cost > bufValue {
|
||||||
glog.V(logger.Error).Infof("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge))
|
log.Error(fmt.Sprintf("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge)))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -436,25 +435,25 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
// Handle the message depending on its contents
|
// Handle the message depending on its contents
|
||||||
switch msg.Code {
|
switch msg.Code {
|
||||||
case StatusMsg:
|
case StatusMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== StatusMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== StatusMsg from peer %v", p.id))
|
||||||
// Status messages should never arrive after the handshake
|
// Status messages should never arrive after the handshake
|
||||||
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
|
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
|
||||||
|
|
||||||
// Block header query, collect the requested headers and reply
|
// Block header query, collect the requested headers and reply
|
||||||
case AnnounceMsg:
|
case AnnounceMsg:
|
||||||
glog.V(logger.Debug).Infoln("<=== AnnounceMsg from peer %v:", p.id)
|
log.Debug(fmt.Sprintf("<=== AnnounceMsg from peer %v:", p.id))
|
||||||
|
|
||||||
var req announceData
|
var req announceData
|
||||||
if err := msg.Decode(&req); err != nil {
|
if err := msg.Decode(&req); err != nil {
|
||||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infoln("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth)
|
log.Trace(fmt.Sprint("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth))
|
||||||
if pm.fetcher != nil {
|
if pm.fetcher != nil {
|
||||||
pm.fetcher.announce(p, &req)
|
pm.fetcher.announce(p, &req)
|
||||||
}
|
}
|
||||||
|
|
||||||
case GetBlockHeadersMsg:
|
case GetBlockHeadersMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetBlockHeadersMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetBlockHeadersMsg from peer %v", p.id))
|
||||||
// Decode the complex header query
|
// Decode the complex header query
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -539,7 +538,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== BlockHeadersMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== BlockHeadersMsg from peer %v", p.id))
|
||||||
// A batch of headers arrived to one of our previous requests
|
// A batch of headers arrived to one of our previous requests
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
@ -554,12 +553,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
} else {
|
} else {
|
||||||
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
|
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infoln(err)
|
log.Debug(fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case GetBlockBodiesMsg:
|
case GetBlockBodiesMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetBlockBodiesMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetBlockBodiesMsg from peer %v", p.id))
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -596,7 +595,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== BlockBodiesMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== BlockBodiesMsg from peer %v", p.id))
|
||||||
// A batch of block bodies arrived to one of our previous requests
|
// A batch of block bodies arrived to one of our previous requests
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
@ -613,7 +612,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case GetCodeMsg:
|
case GetCodeMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetCodeMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetCodeMsg from peer %v", p.id))
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -657,7 +656,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== CodeMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== CodeMsg from peer %v", p.id))
|
||||||
// A batch of node state data arrived to one of our previous requests
|
// A batch of node state data arrived to one of our previous requests
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
@ -674,7 +673,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case GetReceiptsMsg:
|
case GetReceiptsMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetReceiptsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetReceiptsMsg from peer %v", p.id))
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -705,7 +704,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
// If known, encode and queue for response packet
|
// If known, encode and queue for response packet
|
||||||
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
||||||
glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
|
log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
|
||||||
} else {
|
} else {
|
||||||
receipts = append(receipts, encoded)
|
receipts = append(receipts, encoded)
|
||||||
bytes += len(encoded)
|
bytes += len(encoded)
|
||||||
@ -720,7 +719,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== ReceiptsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== ReceiptsMsg from peer %v", p.id))
|
||||||
// A batch of receipts arrived to one of our previous requests
|
// A batch of receipts arrived to one of our previous requests
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
@ -737,7 +736,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case GetProofsMsg:
|
case GetProofsMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetProofsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetProofsMsg from peer %v", p.id))
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -787,7 +786,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== ProofsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== ProofsMsg from peer %v", p.id))
|
||||||
// A batch of merkle proofs arrived to one of our previous requests
|
// A batch of merkle proofs arrived to one of our previous requests
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
@ -804,7 +803,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case GetHeaderProofsMsg:
|
case GetHeaderProofsMsg:
|
||||||
glog.V(logger.Debug).Infof("<=== GetHeaderProofsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== GetHeaderProofsMsg from peer %v", p.id))
|
||||||
// Decode the retrieval message
|
// Decode the retrieval message
|
||||||
var req struct {
|
var req struct {
|
||||||
ReqID uint64
|
ReqID uint64
|
||||||
@ -848,7 +847,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrUnexpectedResponse, "")
|
return errResp(ErrUnexpectedResponse, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("<=== HeaderProofsMsg from peer %v", p.id)
|
log.Debug(fmt.Sprintf("<=== HeaderProofsMsg from peer %v", p.id))
|
||||||
var resp struct {
|
var resp struct {
|
||||||
ReqID, BV uint64
|
ReqID, BV uint64
|
||||||
Data []ChtResp
|
Data []ChtResp
|
||||||
@ -885,7 +884,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
glog.V(logger.Debug).Infof("<=== unknown message with code %d from peer %v", msg.Code, p.id)
|
log.Debug(fmt.Sprintf("<=== unknown message with code %d from peer %v", msg.Code, p.id))
|
||||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,14 +19,14 @@ package les
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
|
|||||||
select {
|
select {
|
||||||
case <-delivered:
|
case <-delivered:
|
||||||
case <-time.After(hardRequestTimeout):
|
case <-time.After(hardRequestTimeout):
|
||||||
glog.V(logger.Debug).Infof("ODR hard request timeout from peer %v", peer.id)
|
log.Debug(fmt.Sprintf("ODR hard request timeout from peer %v", peer.id))
|
||||||
go self.removePeer(peer.id)
|
go self.removePeer(peer.id)
|
||||||
case <-self.stop:
|
case <-self.stop:
|
||||||
return
|
return
|
||||||
@ -237,7 +237,7 @@ func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err err
|
|||||||
// retrieved from network, store in db
|
// retrieved from network, store in db
|
||||||
req.StoreResult(self.db)
|
req.StoreResult(self.db)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Debug).Infof("networkRequest err = %v", err)
|
log.Debug(fmt.Sprintf("networkRequest err = %v", err))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ package les
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
@ -28,8 +29,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -74,7 +74,7 @@ func (self *BlockRequest) CanSend(peer *peer) bool {
|
|||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
|
func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
|
||||||
glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id)
|
log.Debug(fmt.Sprintf("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id))
|
||||||
return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,39 +82,39 @@ func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
// returns true and stores results in memory if the message was a valid reply
|
// returns true and stores results in memory if the message was a valid reply
|
||||||
// to the request (implementation of LesOdrRequest)
|
// to the request (implementation of LesOdrRequest)
|
||||||
func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||||
glog.V(logger.Debug).Infof("ODR: validating body of block %08x", self.Hash[:4])
|
log.Debug(fmt.Sprintf("ODR: validating body of block %08x", self.Hash[:4]))
|
||||||
if msg.MsgType != MsgBlockBodies {
|
if msg.MsgType != MsgBlockBodies {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid message type")
|
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
bodies := msg.Obj.([]*types.Body)
|
bodies := msg.Obj.([]*types.Body)
|
||||||
if len(bodies) != 1 {
|
if len(bodies) != 1 {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(bodies))
|
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(bodies)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
body := bodies[0]
|
body := bodies[0]
|
||||||
header := core.GetHeader(db, self.Hash, self.Number)
|
header := core.GetHeader(db, self.Hash, self.Number)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
|
log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
txHash := types.DeriveSha(types.Transactions(body.Transactions))
|
txHash := types.DeriveSha(types.Transactions(body.Transactions))
|
||||||
if header.TxHash != txHash {
|
if header.TxHash != txHash {
|
||||||
glog.V(logger.Debug).Infof("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4])
|
log.Debug(fmt.Sprintf("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
uncleHash := types.CalcUncleHash(body.Uncles)
|
uncleHash := types.CalcUncleHash(body.Uncles)
|
||||||
if header.UncleHash != uncleHash {
|
if header.UncleHash != uncleHash {
|
||||||
glog.V(logger.Debug).Infof("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4])
|
log.Debug(fmt.Sprintf("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
data, err := rlp.EncodeToBytes(body)
|
data, err := rlp.EncodeToBytes(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: body RLP encode error: %v", err)
|
log.Debug(fmt.Sprintf("ODR: body RLP encode error: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
self.Rlp = data
|
self.Rlp = data
|
||||||
glog.V(logger.Debug).Infof("ODR: validation successful")
|
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ func (self *ReceiptsRequest) CanSend(peer *peer) bool {
|
|||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
|
func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
|
||||||
glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id)
|
log.Debug(fmt.Sprintf("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id))
|
||||||
return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,28 +142,28 @@ func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
// returns true and stores results in memory if the message was a valid reply
|
// returns true and stores results in memory if the message was a valid reply
|
||||||
// to the request (implementation of LesOdrRequest)
|
// to the request (implementation of LesOdrRequest)
|
||||||
func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||||
glog.V(logger.Debug).Infof("ODR: validating receipts for block %08x", self.Hash[:4])
|
log.Debug(fmt.Sprintf("ODR: validating receipts for block %08x", self.Hash[:4]))
|
||||||
if msg.MsgType != MsgReceipts {
|
if msg.MsgType != MsgReceipts {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid message type")
|
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
receipts := msg.Obj.([]types.Receipts)
|
receipts := msg.Obj.([]types.Receipts)
|
||||||
if len(receipts) != 1 {
|
if len(receipts) != 1 {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(receipts))
|
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(receipts)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
hash := types.DeriveSha(receipts[0])
|
hash := types.DeriveSha(receipts[0])
|
||||||
header := core.GetHeader(db, self.Hash, self.Number)
|
header := core.GetHeader(db, self.Hash, self.Number)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
|
log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
|
if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
|
||||||
glog.V(logger.Debug).Infof("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4])
|
log.Debug(fmt.Sprintf("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
self.Receipts = receipts[0]
|
self.Receipts = receipts[0]
|
||||||
glog.V(logger.Debug).Infof("ODR: validation successful")
|
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,7 +189,7 @@ func (self *TrieRequest) CanSend(peer *peer) bool {
|
|||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
|
func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
|
||||||
glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id)
|
log.Debug(fmt.Sprintf("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id))
|
||||||
req := &ProofReq{
|
req := &ProofReq{
|
||||||
BHash: self.Id.BlockHash,
|
BHash: self.Id.BlockHash,
|
||||||
AccKey: self.Id.AccKey,
|
AccKey: self.Id.AccKey,
|
||||||
@ -202,24 +202,24 @@ func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
// returns true and stores results in memory if the message was a valid reply
|
// returns true and stores results in memory if the message was a valid reply
|
||||||
// to the request (implementation of LesOdrRequest)
|
// to the request (implementation of LesOdrRequest)
|
||||||
func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||||
glog.V(logger.Debug).Infof("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4])
|
log.Debug(fmt.Sprintf("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]))
|
||||||
|
|
||||||
if msg.MsgType != MsgProofs {
|
if msg.MsgType != MsgProofs {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid message type")
|
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
proofs := msg.Obj.([][]rlp.RawValue)
|
proofs := msg.Obj.([][]rlp.RawValue)
|
||||||
if len(proofs) != 1 {
|
if len(proofs) != 1 {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
|
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
|
_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: merkle proof verification error: %v", err)
|
log.Debug(fmt.Sprintf("ODR: merkle proof verification error: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
self.Proof = proofs[0]
|
self.Proof = proofs[0]
|
||||||
glog.V(logger.Debug).Infof("ODR: validation successful")
|
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +244,7 @@ func (self *CodeRequest) CanSend(peer *peer) bool {
|
|||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
|
func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
|
||||||
glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id)
|
log.Debug(fmt.Sprintf("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id))
|
||||||
req := &CodeReq{
|
req := &CodeReq{
|
||||||
BHash: self.Id.BlockHash,
|
BHash: self.Id.BlockHash,
|
||||||
AccKey: self.Id.AccKey,
|
AccKey: self.Id.AccKey,
|
||||||
@ -256,23 +256,23 @@ func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
// returns true and stores results in memory if the message was a valid reply
|
// returns true and stores results in memory if the message was a valid reply
|
||||||
// to the request (implementation of LesOdrRequest)
|
// to the request (implementation of LesOdrRequest)
|
||||||
func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||||
glog.V(logger.Debug).Infof("ODR: validating node data for hash %08x", self.Hash[:4])
|
log.Debug(fmt.Sprintf("ODR: validating node data for hash %08x", self.Hash[:4]))
|
||||||
if msg.MsgType != MsgCode {
|
if msg.MsgType != MsgCode {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid message type")
|
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
reply := msg.Obj.([][]byte)
|
reply := msg.Obj.([][]byte)
|
||||||
if len(reply) != 1 {
|
if len(reply) != 1 {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(reply))
|
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(reply)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
data := reply[0]
|
data := reply[0]
|
||||||
if hash := crypto.Keccak256Hash(data); self.Hash != hash {
|
if hash := crypto.Keccak256Hash(data); self.Hash != hash {
|
||||||
glog.V(logger.Debug).Infof("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4])
|
log.Debug(fmt.Sprintf("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
self.Data = data
|
self.Data = data
|
||||||
glog.V(logger.Debug).Infof("ODR: validation successful")
|
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,7 +304,7 @@ func (self *ChtRequest) CanSend(peer *peer) bool {
|
|||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
|
func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
|
||||||
glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id)
|
log.Debug(fmt.Sprintf("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id))
|
||||||
req := &ChtReq{
|
req := &ChtReq{
|
||||||
ChtNum: self.ChtNum,
|
ChtNum: self.ChtNum,
|
||||||
BlockNum: self.BlockNum,
|
BlockNum: self.BlockNum,
|
||||||
@ -316,15 +316,15 @@ func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
// returns true and stores results in memory if the message was a valid reply
|
// returns true and stores results in memory if the message was a valid reply
|
||||||
// to the request (implementation of LesOdrRequest)
|
// to the request (implementation of LesOdrRequest)
|
||||||
func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
||||||
glog.V(logger.Debug).Infof("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum)
|
log.Debug(fmt.Sprintf("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum))
|
||||||
|
|
||||||
if msg.MsgType != MsgHeaderProofs {
|
if msg.MsgType != MsgHeaderProofs {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid message type")
|
log.Debug(fmt.Sprintf("ODR: invalid message type"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
proofs := msg.Obj.([]ChtResp)
|
proofs := msg.Obj.([]ChtResp)
|
||||||
if len(proofs) != 1 {
|
if len(proofs) != 1 {
|
||||||
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
|
log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
proof := proofs[0]
|
proof := proofs[0]
|
||||||
@ -332,22 +332,22 @@ func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
|
|||||||
binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
|
binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
|
||||||
value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
|
value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: CHT merkle proof verification error: %v", err)
|
log.Debug(fmt.Sprintf("ODR: CHT merkle proof verification error: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
var node light.ChtNode
|
var node light.ChtNode
|
||||||
if err := rlp.DecodeBytes(value, &node); err != nil {
|
if err := rlp.DecodeBytes(value, &node); err != nil {
|
||||||
glog.V(logger.Debug).Infof("ODR: error decoding CHT node: %v", err)
|
log.Debug(fmt.Sprintf("ODR: error decoding CHT node: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if node.Hash != proof.Header.Hash() {
|
if node.Hash != proof.Header.Hash() {
|
||||||
glog.V(logger.Debug).Infof("ODR: CHT header hash does not match")
|
log.Debug(fmt.Sprintf("ODR: CHT header hash does not match"))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Proof = proof.Proof
|
self.Proof = proof.Proof
|
||||||
self.Header = proof.Header
|
self.Header = proof.Header
|
||||||
self.Td = node.Td
|
self.Td = node.Td
|
||||||
glog.V(logger.Debug).Infof("ODR: validation successful")
|
log.Debug(fmt.Sprintf("ODR: validation successful"))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
19
les/peer.go
19
les/peer.go
@ -27,8 +27,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
@ -196,51 +195,51 @@ func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error {
|
|||||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the hash of an origin block.
|
// specified header query, based on the hash of an origin block.
|
||||||
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
|
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
|
log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
|
||||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the number of an origin block.
|
// specified header query, based on the number of an origin block.
|
||||||
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
|
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
|
log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
|
||||||
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||||
// specified.
|
// specified.
|
||||||
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
|
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
|
log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
|
||||||
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
|
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestCode fetches a batch of arbitrary data from a node's known state
|
// RequestCode fetches a batch of arbitrary data from a node's known state
|
||||||
// data, corresponding to the specified hashes.
|
// data, corresponding to the specified hashes.
|
||||||
func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
|
func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(reqs))
|
log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(reqs)))
|
||||||
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
|
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||||
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
|
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
|
log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
|
||||||
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
|
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
||||||
func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
|
func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v proofs", p, len(reqs))
|
log.Debug(fmt.Sprintf("%v fetching %v proofs", p, len(reqs)))
|
||||||
return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
|
return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
|
// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
|
||||||
func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
|
func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
|
||||||
glog.V(logger.Debug).Infof("%v fetching %v header proofs", p, len(reqs))
|
log.Debug(fmt.Sprintf("%v fetching %v header proofs", p, len(reqs)))
|
||||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
|
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
|
func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
|
||||||
glog.V(logger.Debug).Infof("%v relaying %v txs", p, len(txs))
|
log.Debug(fmt.Sprintf("%v relaying %v txs", p, len(txs)))
|
||||||
reqID := getNextReqID()
|
reqID := getNextReqID()
|
||||||
p.fcServer.MustAssignRequest(reqID)
|
p.fcServer.MustAssignRequest(reqID)
|
||||||
p.fcServer.SendRequest(reqID, cost)
|
p.fcServer.SendRequest(reqID, cost)
|
||||||
|
@ -19,6 +19,7 @@ package les
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -30,8 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
@ -292,7 +292,7 @@ func (pm *ProtocolManager) blockLoop() {
|
|||||||
lastHead = header
|
lastHead = header
|
||||||
lastBroadcastTd = td
|
lastBroadcastTd = td
|
||||||
|
|
||||||
glog.V(logger.Debug).Infoln("===> ", number, hash, td, reorg)
|
log.Debug(fmt.Sprint("===> ", number, hash, td, reorg))
|
||||||
|
|
||||||
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
|
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
@ -396,7 +396,7 @@ func makeCht(db ethdb.Database) bool {
|
|||||||
} else {
|
} else {
|
||||||
lastChtNum++
|
lastChtNum++
|
||||||
|
|
||||||
glog.V(logger.Detail).Infof("cht: %d %064x", lastChtNum, root)
|
log.Trace(fmt.Sprintf("cht: %d %064x", lastChtNum, root))
|
||||||
|
|
||||||
storeChtRoot(db, lastChtNum, root)
|
storeChtRoot(db, lastChtNum, root)
|
||||||
var data [8]byte
|
var data [8]byte
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
package les
|
package les
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -28,8 +29,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||||
@ -162,7 +162,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
|||||||
if entry == nil {
|
if entry == nil {
|
||||||
entry = pool.findOrNewNode(p.ID(), ip, port)
|
entry = pool.findOrNewNode(p.ID(), ip, port)
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("connecting to %v, state: %v", p.id, entry.state)
|
log.Debug(fmt.Sprintf("connecting to %v, state: %v", p.id, entry.state))
|
||||||
if entry.state == psConnected || entry.state == psRegistered {
|
if entry.state == psConnected || entry.state == psRegistered {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
|||||||
|
|
||||||
// registered should be called after a successful handshake
|
// registered should be called after a successful handshake
|
||||||
func (pool *serverPool) registered(entry *poolEntry) {
|
func (pool *serverPool) registered(entry *poolEntry) {
|
||||||
glog.V(logger.Debug).Infof("registered %v", entry.id.String())
|
log.Debug(fmt.Sprintf("registered %v", entry.id.String()))
|
||||||
pool.lock.Lock()
|
pool.lock.Lock()
|
||||||
defer pool.lock.Unlock()
|
defer pool.lock.Unlock()
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ func (pool *serverPool) registered(entry *poolEntry) {
|
|||||||
// can be updated optionally (not updated if no registration happened, in this case
|
// can be updated optionally (not updated if no registration happened, in this case
|
||||||
// only connection statistics are updated, just like in case of timeout)
|
// only connection statistics are updated, just like in case of timeout)
|
||||||
func (pool *serverPool) disconnect(entry *poolEntry) {
|
func (pool *serverPool) disconnect(entry *poolEntry) {
|
||||||
glog.V(logger.Debug).Infof("disconnected %v", entry.id.String())
|
log.Debug(fmt.Sprintf("disconnected %v", entry.id.String()))
|
||||||
pool.lock.Lock()
|
pool.lock.Lock()
|
||||||
defer pool.lock.Unlock()
|
defer pool.lock.Unlock()
|
||||||
|
|
||||||
@ -418,7 +418,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
|
|||||||
now := mclock.Now()
|
now := mclock.Now()
|
||||||
entry := pool.entries[id]
|
entry := pool.entries[id]
|
||||||
if entry == nil {
|
if entry == nil {
|
||||||
glog.V(logger.Debug).Infof("discovered %v", id.String())
|
log.Debug(fmt.Sprintf("discovered %v", id.String()))
|
||||||
entry = &poolEntry{
|
entry = &poolEntry{
|
||||||
id: id,
|
id: id,
|
||||||
addr: make(map[string]*poolEntryAddress),
|
addr: make(map[string]*poolEntryAddress),
|
||||||
@ -459,11 +459,11 @@ func (pool *serverPool) loadNodes() {
|
|||||||
var list []*poolEntry
|
var list []*poolEntry
|
||||||
err = rlp.DecodeBytes(enc, &list)
|
err = rlp.DecodeBytes(enc, &list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("node list decode error: %v", err)
|
log.Debug(fmt.Sprintf("node list decode error: %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, e := range list {
|
for _, e := range list {
|
||||||
glog.V(logger.Debug).Infof("loaded server stats %016x fails: %v connStats: %v / %v delayStats: %v / %v responseStats: %v / %v timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight)
|
log.Debug(fmt.Sprintf("loaded server stats %016x fails: %v connStats: %v / %v delayStats: %v / %v responseStats: %v / %v timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight))
|
||||||
pool.entries[e.id] = e
|
pool.entries[e.id] = e
|
||||||
pool.knownQueue.setLatest(e)
|
pool.knownQueue.setLatest(e)
|
||||||
pool.knownSelect.update((*knownEntry)(e))
|
pool.knownSelect.update((*knownEntry)(e))
|
||||||
@ -568,7 +568,7 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
|
|||||||
pool.newSelected++
|
pool.newSelected++
|
||||||
}
|
}
|
||||||
addr := entry.addrSelect.choose().(*poolEntryAddress)
|
addr := entry.addrSelect.choose().(*poolEntryAddress)
|
||||||
glog.V(logger.Debug).Infof("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected)
|
log.Debug(fmt.Sprintf("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected))
|
||||||
entry.dialed = addr
|
entry.dialed = addr
|
||||||
go func() {
|
go func() {
|
||||||
pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
|
pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
|
||||||
@ -589,7 +589,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
|
|||||||
if entry.state != psDialed {
|
if entry.state != psDialed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("timeout %v", entry.id.String()+"@"+entry.dialed.strKey())
|
log.Debug(fmt.Sprintf("timeout %v", entry.id.String()+"@"+entry.dialed.strKey()))
|
||||||
entry.state = psNotConnected
|
entry.state = psNotConnected
|
||||||
if entry.knownSelected {
|
if entry.knownSelected {
|
||||||
pool.knownSelected--
|
pool.knownSelected--
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package light
|
package light
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -27,8 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -101,7 +101,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
|
if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
|
||||||
@ -117,7 +117,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
|||||||
Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
|
Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("Added trusted CHT for mainnet")
|
log.Info(fmt.Sprint("Added trusted CHT for mainnet"))
|
||||||
} else {
|
} else {
|
||||||
if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
|
if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
|
||||||
// add trusted CHT for testnet
|
// add trusted CHT for testnet
|
||||||
@ -125,7 +125,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
|||||||
Number: 452,
|
Number: 452,
|
||||||
Root: common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"),
|
Root: common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"),
|
||||||
})
|
})
|
||||||
glog.V(logger.Info).Infoln("Added trusted CHT for testnet")
|
log.Info(fmt.Sprint("Added trusted CHT for testnet"))
|
||||||
} else {
|
} else {
|
||||||
DeleteTrustedCht(bc.chainDb)
|
DeleteTrustedCht(bc.chainDb)
|
||||||
}
|
}
|
||||||
@ -137,9 +137,9 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
|||||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||||
for hash := range core.BadHashes {
|
for hash := range core.BadHashes {
|
||||||
if header := bc.GetHeaderByHash(hash); header != nil {
|
if header := bc.GetHeaderByHash(hash); header != nil {
|
||||||
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
|
log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
|
||||||
bc.SetHead(header.Number.Uint64() - 1)
|
bc.SetHead(header.Number.Uint64() - 1)
|
||||||
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
|
log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return bc, nil
|
return bc, nil
|
||||||
@ -169,7 +169,7 @@ func (self *LightChain) loadLastState() error {
|
|||||||
// Issue a status log and return
|
// Issue a status log and return
|
||||||
header := self.hc.CurrentHeader()
|
header := self.hc.CurrentHeader()
|
||||||
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
||||||
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
|
log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -246,10 +246,10 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
|
|||||||
|
|
||||||
// Prepare the genesis block and reinitialise the chain
|
// Prepare the genesis block and reinitialise the chain
|
||||||
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||||
glog.Fatalf("failed to write genesis block TD: %v", err)
|
log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
|
||||||
}
|
}
|
||||||
if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
|
if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
|
||||||
glog.Fatalf("failed to write genesis block: %v", err)
|
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
|
||||||
}
|
}
|
||||||
bc.genesisBlock = genesis
|
bc.genesisBlock = genesis
|
||||||
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
||||||
@ -346,7 +346,7 @@ func (bc *LightChain) Stop() {
|
|||||||
|
|
||||||
bc.wg.Wait()
|
bc.wg.Wait()
|
||||||
|
|
||||||
glog.V(logger.Info).Infoln("Chain manager stopped")
|
log.Info(fmt.Sprint("Chain manager stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback is designed to remove a chain of links from the database that aren't
|
// Rollback is designed to remove a chain of links from the database that aren't
|
||||||
@ -406,15 +406,15 @@ func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
|
|||||||
|
|
||||||
switch status {
|
switch status {
|
||||||
case core.CanonStatTy:
|
case core.CanonStatTy:
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
|
return fmt.Sprintf("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
|
||||||
}
|
}})
|
||||||
events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
|
events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
|
||||||
|
|
||||||
case core.SideStatTy:
|
case core.SideStatTy:
|
||||||
if glog.V(logger.Detail) {
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
|
return fmt.Sprintf("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
|
||||||
}
|
}})
|
||||||
events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
|
events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
|
||||||
|
|
||||||
case core.SplitStatTy:
|
case core.SplitStatTy:
|
||||||
|
@ -19,6 +19,7 @@ package light
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -26,8 +27,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
@ -149,7 +149,7 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
|
|||||||
}
|
}
|
||||||
body := new(types.Body)
|
body := new(types.Body)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||||
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
|
log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return body, nil
|
return body, nil
|
||||||
|
@ -17,12 +17,12 @@
|
|||||||
package light
|
package light
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -239,9 +239,9 @@ func (self *LightState) GetOrNewStateObject(ctx context.Context, addr common.Add
|
|||||||
|
|
||||||
// newStateObject creates a state object whether it exists in the state or not
|
// newStateObject creates a state object whether it exists in the state or not
|
||||||
func (self *LightState) newStateObject(addr common.Address) *StateObject {
|
func (self *LightState) newStateObject(addr common.Address) *StateObject {
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("(+) %x\n", addr)
|
return fmt.Sprintf("(+) %x\n", addr)
|
||||||
}
|
}})
|
||||||
|
|
||||||
stateObject := NewStateObject(addr, self.odr)
|
stateObject := NewStateObject(addr, self.odr)
|
||||||
self.stateObjects[addr.Str()] = stateObject
|
self.stateObjects[addr.Str()] = stateObject
|
||||||
|
@ -23,8 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
@ -109,9 +108,9 @@ func (self *StateObject) MarkForDeletion() {
|
|||||||
self.remove = true
|
self.remove = true
|
||||||
self.dirty = true
|
self.dirty = true
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
|
return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAddr gets the storage value at the given address from the trie
|
// getAddr gets the storage value at the given address from the trie
|
||||||
@ -158,18 +157,18 @@ func (self *StateObject) SetState(k, value common.Hash) {
|
|||||||
func (c *StateObject) AddBalance(amount *big.Int) {
|
func (c *StateObject) AddBalance(amount *big.Int) {
|
||||||
c.SetBalance(new(big.Int).Add(c.balance, amount))
|
c.SetBalance(new(big.Int).Add(c.balance, amount))
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
|
return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubBalance subtracts the given amount from the account balance
|
// SubBalance subtracts the given amount from the account balance
|
||||||
func (c *StateObject) SubBalance(amount *big.Int) {
|
func (c *StateObject) SubBalance(amount *big.Int) {
|
||||||
c.SetBalance(new(big.Int).Sub(c.balance, amount))
|
c.SetBalance(new(big.Int).Sub(c.balance, amount))
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
|
return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
|
||||||
}
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBalance sets the account balance to the given amount
|
// SetBalance sets the account balance to the given amount
|
||||||
|
@ -26,8 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -321,7 +320,7 @@ func (pool *TxPool) eventLoop() {
|
|||||||
func (pool *TxPool) Stop() {
|
func (pool *TxPool) Stop() {
|
||||||
close(pool.quit)
|
close(pool.quit)
|
||||||
pool.events.Unsubscribe()
|
pool.events.Unsubscribe()
|
||||||
glog.V(logger.Info).Infoln("Transaction pool stopped")
|
log.Info(fmt.Sprint("Transaction pool stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns the number of currently pending (locally created) transactions
|
// Stats returns the number of currently pending (locally created) transactions
|
||||||
@ -417,7 +416,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
|
|||||||
go self.eventMux.Post(core.TxPreEvent{Tx: tx})
|
go self.eventMux.Post(core.TxPreEvent{Tx: tx})
|
||||||
}
|
}
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||||
var toname string
|
var toname string
|
||||||
if to := tx.To(); to != nil {
|
if to := tx.To(); to != nil {
|
||||||
toname = common.Bytes2Hex(to[:4])
|
toname = common.Bytes2Hex(to[:4])
|
||||||
@ -428,8 +427,8 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
|
|||||||
// verified in ValidateTransaction.
|
// verified in ValidateTransaction.
|
||||||
f, _ := types.Sender(self.signer, tx)
|
f, _ := types.Sender(self.signer, tx)
|
||||||
from := common.Bytes2Hex(f[:4])
|
from := common.Bytes2Hex(f[:4])
|
||||||
glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash)
|
return fmt.Sprintf("(t) %x => %s (%v) %x\n", from, toname, tx.Value(), hash)
|
||||||
}
|
}})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -464,11 +463,11 @@ func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
|
|||||||
|
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
if err := self.add(ctx, tx); err != nil {
|
if err := self.add(ctx, tx); err != nil {
|
||||||
glog.V(logger.Debug).Infoln("tx error:", err)
|
log.Debug(fmt.Sprint("tx error:", err))
|
||||||
} else {
|
} else {
|
||||||
sendTx = append(sendTx, tx)
|
sendTx = append(sendTx, tx)
|
||||||
h := tx.Hash()
|
h := tx.Hash()
|
||||||
glog.V(logger.Debug).Infof("tx %x\n", h[:4])
|
log.Debug(fmt.Sprintf("tx %x\n", h[:4]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
log/CONTRIBUTORS
Normal file
11
log/CONTRIBUTORS
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
Contributors to log15:
|
||||||
|
|
||||||
|
- Aaron L
|
||||||
|
- Alan Shreve
|
||||||
|
- Chris Hines
|
||||||
|
- Ciaran Downey
|
||||||
|
- Dmitry Chestnykh
|
||||||
|
- Evan Shaw
|
||||||
|
- Péter Szilágyi
|
||||||
|
- Trevor Gattis
|
||||||
|
- Vincent Vanackere
|
13
log/LICENSE
Normal file
13
log/LICENSE
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Copyright 2014 Alan Shreve
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
77
log/README.md
Normal file
77
log/README.md
Normal file
@ -0,0 +1,77 @@
|
|||||||
|

|
||||||
|
|
||||||
|
# log15 [](https://godoc.org/github.com/inconshreveable/log15) [](https://travis-ci.org/inconshreveable/log15)
|
||||||
|
|
||||||
|
Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
- A simple, easy-to-understand API
|
||||||
|
- Promotes structured logging by encouraging use of key/value pairs
|
||||||
|
- Child loggers which inherit and add their own private context
|
||||||
|
- Lazy evaluation of expensive operations
|
||||||
|
- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
|
||||||
|
- Color terminal support
|
||||||
|
- Built-in support for logging to files, streams, syslog, and the network
|
||||||
|
- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
|
||||||
|
|
||||||
|
## Versioning
|
||||||
|
The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API,
|
||||||
|
you must vendor the library.
|
||||||
|
|
||||||
|
## Importing
|
||||||
|
|
||||||
|
```go
|
||||||
|
import log "github.com/inconshreveable/log15"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
```go
|
||||||
|
// all loggers can have key/value context
|
||||||
|
srvlog := log.New("module", "app/server")
|
||||||
|
|
||||||
|
// all log messages can have key/value context
|
||||||
|
srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
|
||||||
|
|
||||||
|
// child loggers with inherited context
|
||||||
|
connlog := srvlog.New("raddr", c.RemoteAddr())
|
||||||
|
connlog.Info("connection open")
|
||||||
|
|
||||||
|
// lazy evaluation
|
||||||
|
connlog.Debug("ping remote", "latency", log.Lazy{pingRemote})
|
||||||
|
|
||||||
|
// flexible configuration
|
||||||
|
srvlog.SetHandler(log.MultiHandler(
|
||||||
|
log.StreamHandler(os.Stderr, log.LogfmtFormat()),
|
||||||
|
log.LvlFilterHandler(
|
||||||
|
log.LvlError,
|
||||||
|
log.Must.FileHandler("errors.json", log.JsonFormat()))))
|
||||||
|
```
|
||||||
|
|
||||||
|
Will result in output that looks like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
WARN[06-17|21:58:10] abnormal conn rate module=app/server rate=0.500 low=0.100 high=0.800
|
||||||
|
INFO[06-17|21:58:10] connection open module=app/server raddr=10.0.0.1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Breaking API Changes
|
||||||
|
The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version
|
||||||
|
of log15.
|
||||||
|
|
||||||
|
- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler
|
||||||
|
- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack`
|
||||||
|
- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### The varargs style is brittle and error prone! Can I have type safety please?
|
||||||
|
Yes. Use `log.Ctx`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
srvlog := log.New(log.Ctx{"module": "app/server"})
|
||||||
|
srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
Apache
|
5
log/README_ETHEREUM.md
Normal file
5
log/README_ETHEREUM.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
This package is a fork of https://github.com/inconshreveable/log15, with some
|
||||||
|
minor modifications required by the go-ethereum codebase:
|
||||||
|
|
||||||
|
* Support for log level `trace`
|
||||||
|
* Modified behavior to exit on `critical` failure
|
333
log/doc.go
Normal file
333
log/doc.go
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
/*
|
||||||
|
Package log15 provides an opinionated, simple toolkit for best-practice logging that is
|
||||||
|
both human and machine readable. It is modeled after the standard library's io and net/http
|
||||||
|
packages.
|
||||||
|
|
||||||
|
This package enforces you to only log key/value pairs. Keys must be strings. Values may be
|
||||||
|
any type that you like. The default output format is logfmt, but you may also choose to use
|
||||||
|
JSON instead if that suits you. Here's how you log:
|
||||||
|
|
||||||
|
log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
|
||||||
|
|
||||||
|
Getting Started
|
||||||
|
|
||||||
|
To get started, you'll want to import the library:
|
||||||
|
|
||||||
|
import log "github.com/inconshreveable/log15"
|
||||||
|
|
||||||
|
|
||||||
|
Now you're ready to start logging:
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.Info("Program starting", "args", os.Args())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Convention
|
||||||
|
|
||||||
|
Because recording a human-meaningful message is common and good practice, the first argument to every
|
||||||
|
logging method is the value to the *implicit* key 'msg'.
|
||||||
|
|
||||||
|
Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
|
||||||
|
will the current timestamp with key 't'.
|
||||||
|
|
||||||
|
You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
|
||||||
|
you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
|
||||||
|
logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
|
||||||
|
in the variadic argument list:
|
||||||
|
|
||||||
|
log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
|
||||||
|
|
||||||
|
If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
|
||||||
|
|
||||||
|
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
|
||||||
|
|
||||||
|
|
||||||
|
Context loggers
|
||||||
|
|
||||||
|
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
|
||||||
|
request is a good example. You can easily create new loggers that have context that is automatically included
|
||||||
|
with each log line:
|
||||||
|
|
||||||
|
requestlogger := log.New("path", r.URL.Path)
|
||||||
|
|
||||||
|
// later
|
||||||
|
requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
|
||||||
|
|
||||||
|
This will output a log line that includes the path context that is attached to the logger:
|
||||||
|
|
||||||
|
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
|
||||||
|
|
||||||
|
|
||||||
|
Handlers
|
||||||
|
|
||||||
|
The Handler interface defines where log lines are printed to and how they are formated. Handler is a
|
||||||
|
single interface that is inspired by net/http's handler interface:
|
||||||
|
|
||||||
|
type Handler interface {
|
||||||
|
Log(r *Record) error
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Handlers can filter records, format them, or dispatch to multiple other Handlers.
|
||||||
|
This package implements a number of Handlers for common logging patterns that are
|
||||||
|
easily composed to create flexible, custom logging structures.
|
||||||
|
|
||||||
|
Here's an example handler that prints logfmt output to Stdout:
|
||||||
|
|
||||||
|
handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
|
||||||
|
|
||||||
|
Here's an example handler that defers to two other handlers. One handler only prints records
|
||||||
|
from the rpc package in logfmt to standard out. The other prints records at Error level
|
||||||
|
or above in JSON formatted output to the file /var/log/service.json
|
||||||
|
|
||||||
|
handler := log.MultiHandler(
|
||||||
|
log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
|
||||||
|
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
|
||||||
|
)
|
||||||
|
|
||||||
|
Logging File Names and Line Numbers
|
||||||
|
|
||||||
|
This package implements three Handlers that add debugging information to the
|
||||||
|
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
|
||||||
|
an example that adds the source file and line number of each logging call to
|
||||||
|
the context.
|
||||||
|
|
||||||
|
h := log.CallerFileHandler(log.StdoutHandler)
|
||||||
|
log.Root().SetHandler(h)
|
||||||
|
...
|
||||||
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
|
||||||
|
|
||||||
|
Here's an example that logs the call stack rather than just the call site.
|
||||||
|
|
||||||
|
h := log.CallerStackHandler("%+v", log.StdoutHandler)
|
||||||
|
log.Root().SetHandler(h)
|
||||||
|
...
|
||||||
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
|
||||||
|
|
||||||
|
The "%+v" format instructs the handler to include the path of the source file
|
||||||
|
relative to the compile time GOPATH. The github.com/go-stack/stack package
|
||||||
|
documents the full list of formatting verbs and modifiers available.
|
||||||
|
|
||||||
|
Custom Handlers
|
||||||
|
|
||||||
|
The Handler interface is so simple that it's also trivial to write your own. Let's create an
|
||||||
|
example handler which tries to write to one handler, but if that fails it falls back to
|
||||||
|
writing to another handler and includes the error that it encountered when trying to write
|
||||||
|
to the primary. This might be useful when trying to log over a network socket, but if that
|
||||||
|
fails you want to log those records to a file on disk.
|
||||||
|
|
||||||
|
type BackupHandler struct {
|
||||||
|
Primary Handler
|
||||||
|
Secondary Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *BackupHandler) Log (r *Record) error {
|
||||||
|
err := h.Primary.Log(r)
|
||||||
|
if err != nil {
|
||||||
|
r.Ctx = append(ctx, "primary_err", err)
|
||||||
|
return h.Secondary.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
|
||||||
|
is included as part of this library called FailoverHandler.
|
||||||
|
|
||||||
|
Logging Expensive Operations
|
||||||
|
|
||||||
|
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
|
||||||
|
the price of computing them if you haven't turned up your logging level to a high level of detail.
|
||||||
|
|
||||||
|
This package provides a simple type to annotate a logging operation that you want to be evaluated
|
||||||
|
lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
|
||||||
|
filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
|
||||||
|
|
||||||
|
func factorRSAKey() (factors []int) {
|
||||||
|
// return the factors of a very large number
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("factors", log.Lazy{factorRSAKey})
|
||||||
|
|
||||||
|
If this message is not logged for any reason (like logging at the Error level), then
|
||||||
|
factorRSAKey is never evaluated.
|
||||||
|
|
||||||
|
Dynamic context values
|
||||||
|
|
||||||
|
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
|
||||||
|
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
|
||||||
|
a game where you have Player objects:
|
||||||
|
|
||||||
|
type Player struct {
|
||||||
|
name string
|
||||||
|
alive bool
|
||||||
|
log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
You always want to log a player's name and whether they're alive or dead, so when you create the player
|
||||||
|
object, you might do:
|
||||||
|
|
||||||
|
p := &Player{name: name, alive: true}
|
||||||
|
p.Logger = log.New("name", p.name, "alive", p.alive)
|
||||||
|
|
||||||
|
Only now, even after a player has died, the logger will still report they are alive because the logging
|
||||||
|
context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
|
||||||
|
of whether the player is alive or not to each log message, so that the log records will reflect the player's
|
||||||
|
current state no matter when the log message is written:
|
||||||
|
|
||||||
|
p := &Player{name: name, alive: true}
|
||||||
|
isAlive := func() bool { return p.alive }
|
||||||
|
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
|
||||||
|
|
||||||
|
Terminal Format
|
||||||
|
|
||||||
|
If log15 detects that stdout is a terminal, it will configure the default
|
||||||
|
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
|
||||||
|
logs records nicely for your terminal, including color-coded output based
|
||||||
|
on log level.
|
||||||
|
|
||||||
|
Error Handling
|
||||||
|
|
||||||
|
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
|
||||||
|
invalid arguments to the logging functions. You could, for example, wrap something that is not
|
||||||
|
a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
|
||||||
|
are typically the mechanism by which errors are reported, it would be onerous for the logging functions
|
||||||
|
to return errors. Instead, log15 handles errors by making these guarantees to you:
|
||||||
|
|
||||||
|
- Any log record containing an error will still be printed with the error explained to you as part of the log record.
|
||||||
|
|
||||||
|
- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
|
||||||
|
(and if you like, automatically) detect if any of your logging calls are passing bad values.
|
||||||
|
|
||||||
|
Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
|
||||||
|
are encouraged to return errors only if they fail to write their log records out to an external source like if the
|
||||||
|
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
|
||||||
|
like the FailoverHandler.
|
||||||
|
|
||||||
|
Library Use
|
||||||
|
|
||||||
|
log15 is intended to be useful for library authors as a way to provide configurable logging to
|
||||||
|
users of their library. Best practice for use in a library is to always disable all output for your logger
|
||||||
|
by default and to provide a public Logger instance that consumers of your library can configure. Like so:
|
||||||
|
|
||||||
|
package yourlib
|
||||||
|
|
||||||
|
import "github.com/inconshreveable/log15"
|
||||||
|
|
||||||
|
var Log = log.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Log.SetHandler(log.DiscardHandler())
|
||||||
|
}
|
||||||
|
|
||||||
|
Users of your library may then enable it if they like:
|
||||||
|
|
||||||
|
import "github.com/inconshreveable/log15"
|
||||||
|
import "example.com/yourlib"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
handler := // custom handler setup
|
||||||
|
yourlib.Log.SetHandler(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
Best practices attaching logger context
|
||||||
|
|
||||||
|
The ability to attach context to a logger is a powerful one. Where should you do it and why?
|
||||||
|
I favor embedding a Logger directly into any persistent object in my application and adding
|
||||||
|
unique, tracing context keys to it. For instance, imagine I am writing a web browser:
|
||||||
|
|
||||||
|
type Tab struct {
|
||||||
|
url string
|
||||||
|
render *RenderingContext
|
||||||
|
// ...
|
||||||
|
|
||||||
|
Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTab(url string) *Tab {
|
||||||
|
return &Tab {
|
||||||
|
// ...
|
||||||
|
url: url,
|
||||||
|
|
||||||
|
Logger: log.New("url", url),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
When a new tab is created, I assign a logger to it with the url of
|
||||||
|
the tab as context so it can easily be traced through the logs.
|
||||||
|
Now, whenever we perform any operation with the tab, we'll log with its
|
||||||
|
embedded logger and it will include the tab title automatically:
|
||||||
|
|
||||||
|
tab.Debug("moved position", "idx", tab.idx)
|
||||||
|
|
||||||
|
There's only one problem. What if the tab url changes? We could
|
||||||
|
use log.Lazy to make sure the current url is always written, but that
|
||||||
|
would mean that we couldn't trace a tab's full lifetime through our
|
||||||
|
logs after the user navigate to a new URL.
|
||||||
|
|
||||||
|
Instead, think about what values to attach to your loggers the
|
||||||
|
same way you think about what to use as a key in a SQL database schema.
|
||||||
|
If it's possible to use a natural key that is unique for the lifetime of the
|
||||||
|
object, do so. But otherwise, log15's ext package has a handy RandId
|
||||||
|
function to let you generate what you might call "surrogate keys"
|
||||||
|
They're just random hex identifiers to use for tracing. Back to our
|
||||||
|
Tab example, we would prefer to set up our Logger like so:
|
||||||
|
|
||||||
|
import logext "github.com/inconshreveable/log15/ext"
|
||||||
|
|
||||||
|
t := &Tab {
|
||||||
|
// ...
|
||||||
|
url: url,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
|
||||||
|
return t
|
||||||
|
|
||||||
|
Now we'll have a unique traceable identifier even across loading new urls, but
|
||||||
|
we'll still be able to see the tab's current url in the log messages.
|
||||||
|
|
||||||
|
Must
|
||||||
|
|
||||||
|
For all Handler functions which can return an error, there is a version of that
|
||||||
|
function which will return no error but panics on failure. They are all available
|
||||||
|
on the Must object. For example:
|
||||||
|
|
||||||
|
log.Must.FileHandler("/path", log.JsonFormat)
|
||||||
|
log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
|
||||||
|
|
||||||
|
Inspiration and Credit
|
||||||
|
|
||||||
|
All of the following excellent projects inspired the design of this library:
|
||||||
|
|
||||||
|
code.google.com/p/log4go
|
||||||
|
|
||||||
|
github.com/op/go-logging
|
||||||
|
|
||||||
|
github.com/technoweenie/grohl
|
||||||
|
|
||||||
|
github.com/Sirupsen/logrus
|
||||||
|
|
||||||
|
github.com/kr/logfmt
|
||||||
|
|
||||||
|
github.com/spacemonkeygo/spacelog
|
||||||
|
|
||||||
|
golang's stdlib, notably io and net/http
|
||||||
|
|
||||||
|
The Name
|
||||||
|
|
||||||
|
https://xkcd.com/927/
|
||||||
|
|
||||||
|
*/
|
||||||
|
package log
|
327
log/format.go
Normal file
327
log/format.go
Normal file
@ -0,0 +1,327 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
timeFormat = "2006-01-02T15:04:05-0700"
|
||||||
|
termTimeFormat = "01-02|15:04:05"
|
||||||
|
floatFormat = 'f'
|
||||||
|
termMsgJust = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
// locationTrims are trimmed for display to avoid unwieldy log lines.
|
||||||
|
var locationTrims = []string{
|
||||||
|
"github.com/ethereum/go-ethereum/",
|
||||||
|
"github.com/ethereum/ethash/",
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintOrigins sets or unsets log location (file:line) printing for terminal
|
||||||
|
// format output.
|
||||||
|
func PrintOrigins(print bool) {
|
||||||
|
if print {
|
||||||
|
atomic.StoreUint32(&locationEnabled, 1)
|
||||||
|
} else {
|
||||||
|
atomic.StoreUint32(&locationEnabled, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// locationEnabled is an atomic flag controlling whether the terminal formatter
|
||||||
|
// should append the log locations too when printing entries.
|
||||||
|
var locationEnabled uint32
|
||||||
|
|
||||||
|
// locationLength is the maxmimum path length encountered, which all logs are
|
||||||
|
// padded to to aid in alignment.
|
||||||
|
var locationLength uint32
|
||||||
|
|
||||||
|
type Format interface {
|
||||||
|
Format(r *Record) []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatFunc returns a new Format object which uses
|
||||||
|
// the given function to perform record formatting.
|
||||||
|
func FormatFunc(f func(*Record) []byte) Format {
|
||||||
|
return formatFunc(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type formatFunc func(*Record) []byte
|
||||||
|
|
||||||
|
func (f formatFunc) Format(r *Record) []byte {
|
||||||
|
return f(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminalFormat formats log records optimized for human readability on
|
||||||
|
// a terminal with color-coded level output and terser human friendly timestamp.
|
||||||
|
// This format should only be used for interactive programs or while developing.
|
||||||
|
//
|
||||||
|
// [TIME] [LEVEL] MESAGE key=value key=value ...
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
|
||||||
|
//
|
||||||
|
func TerminalFormat() Format {
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
var color = 0
|
||||||
|
switch r.Lvl {
|
||||||
|
case LvlCrit:
|
||||||
|
color = 35
|
||||||
|
case LvlError:
|
||||||
|
color = 31
|
||||||
|
case LvlWarn:
|
||||||
|
color = 33
|
||||||
|
case LvlInfo:
|
||||||
|
color = 32
|
||||||
|
case LvlDebug:
|
||||||
|
color = 36
|
||||||
|
case LvlTrace:
|
||||||
|
color = 34
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
lvl := strings.ToUpper(r.Lvl.String())
|
||||||
|
if atomic.LoadUint32(&locationEnabled) != 0 {
|
||||||
|
// Log origin printing was requested, format the location path and line number
|
||||||
|
location := fmt.Sprintf("%+v", r.Call)
|
||||||
|
for _, prefix := range locationTrims {
|
||||||
|
location = strings.TrimPrefix(location, prefix)
|
||||||
|
}
|
||||||
|
// Maintain the maximum location length for fancyer alignment
|
||||||
|
align := int(atomic.LoadUint32(&locationLength))
|
||||||
|
if align < len(location) {
|
||||||
|
align = len(location)
|
||||||
|
atomic.StoreUint32(&locationLength, uint32(align))
|
||||||
|
}
|
||||||
|
padding := strings.Repeat(" ", align-len(location))
|
||||||
|
|
||||||
|
// Assemble and print the log heading
|
||||||
|
if color > 0 {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s|%s]%s %s ", color, lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "[%s] [%s|%s]%s %s ", lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if color > 0 {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// try to justify the log output for short messages
|
||||||
|
if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
|
||||||
|
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the keys logfmt style
|
||||||
|
logfmt(b, r.Ctx, color)
|
||||||
|
return b.Bytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
|
||||||
|
// format for key/value pairs.
|
||||||
|
//
|
||||||
|
// For more details see: http://godoc.org/github.com/kr/logfmt
|
||||||
|
//
|
||||||
|
func LogfmtFormat() Format {
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
logfmt(buf, append(common, r.Ctx...), 0)
|
||||||
|
return buf.Bytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
|
||||||
|
for i := 0; i < len(ctx); i += 2 {
|
||||||
|
if i != 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
|
||||||
|
k, ok := ctx[i].(string)
|
||||||
|
v := formatLogfmtValue(ctx[i+1])
|
||||||
|
if !ok {
|
||||||
|
k, v = errorKey, formatLogfmtValue(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: we should probably check that all of your key bytes aren't invalid
|
||||||
|
if color > 0 {
|
||||||
|
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
|
||||||
|
} else {
|
||||||
|
buf.WriteString(k)
|
||||||
|
buf.WriteByte('=')
|
||||||
|
buf.WriteString(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
// JsonFormat formats log records as JSON objects separated by newlines.
|
||||||
|
// It is the equivalent of JsonFormatEx(false, true).
|
||||||
|
func JsonFormat() Format {
|
||||||
|
return JsonFormatEx(false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JsonFormatEx formats log records as JSON objects. If pretty is true,
|
||||||
|
// records will be pretty-printed. If lineSeparated is true, records
|
||||||
|
// will be logged with a new line between each record.
|
||||||
|
func JsonFormatEx(pretty, lineSeparated bool) Format {
|
||||||
|
jsonMarshal := json.Marshal
|
||||||
|
if pretty {
|
||||||
|
jsonMarshal = func(v interface{}) ([]byte, error) {
|
||||||
|
return json.MarshalIndent(v, "", " ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
props := make(map[string]interface{})
|
||||||
|
|
||||||
|
props[r.KeyNames.Time] = r.Time
|
||||||
|
props[r.KeyNames.Lvl] = r.Lvl.String()
|
||||||
|
props[r.KeyNames.Msg] = r.Msg
|
||||||
|
|
||||||
|
for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
k, ok := r.Ctx[i].(string)
|
||||||
|
if !ok {
|
||||||
|
props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
|
||||||
|
}
|
||||||
|
props[k] = formatJsonValue(r.Ctx[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := jsonMarshal(props)
|
||||||
|
if err != nil {
|
||||||
|
b, _ = jsonMarshal(map[string]string{
|
||||||
|
errorKey: err.Error(),
|
||||||
|
})
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
if lineSeparated {
|
||||||
|
b = append(b, '\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
return b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatShared(value interface{}) (result interface{}) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
result = "nil"
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
switch v := value.(type) {
|
||||||
|
case time.Time:
|
||||||
|
return v.Format(timeFormat)
|
||||||
|
|
||||||
|
case error:
|
||||||
|
return v.Error()
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
return v.String()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatJsonValue(value interface{}) interface{} {
|
||||||
|
value = formatShared(value)
|
||||||
|
switch value.(type) {
|
||||||
|
case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
|
||||||
|
return value
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%+v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatValue formats a value for serialization
|
||||||
|
func formatLogfmtValue(value interface{}) string {
|
||||||
|
if value == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, ok := value.(time.Time); ok {
|
||||||
|
// Performance optimization: No need for escaping since the provided
|
||||||
|
// timeFormat doesn't have any escape characters, and escaping is
|
||||||
|
// expensive.
|
||||||
|
return t.Format(timeFormat)
|
||||||
|
}
|
||||||
|
value = formatShared(value)
|
||||||
|
switch v := value.(type) {
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(v)
|
||||||
|
case float32:
|
||||||
|
return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(v, floatFormat, 3, 64)
|
||||||
|
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||||
|
return fmt.Sprintf("%d", value)
|
||||||
|
case string:
|
||||||
|
return escapeString(v)
|
||||||
|
default:
|
||||||
|
return escapeString(fmt.Sprintf("%+v", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var stringBufPool = sync.Pool{
|
||||||
|
New: func() interface{} { return new(bytes.Buffer) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeString(s string) string {
|
||||||
|
needsQuotes := false
|
||||||
|
needsEscape := false
|
||||||
|
for _, r := range s {
|
||||||
|
if r <= ' ' || r == '=' || r == '"' {
|
||||||
|
needsQuotes = true
|
||||||
|
}
|
||||||
|
if r == '\\' || r == '"' || r == '\n' || r == '\r' || r == '\t' {
|
||||||
|
needsEscape = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if needsEscape == false && needsQuotes == false {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
e := stringBufPool.Get().(*bytes.Buffer)
|
||||||
|
e.WriteByte('"')
|
||||||
|
for _, r := range s {
|
||||||
|
switch r {
|
||||||
|
case '\\', '"':
|
||||||
|
e.WriteByte('\\')
|
||||||
|
e.WriteByte(byte(r))
|
||||||
|
case '\n':
|
||||||
|
e.WriteString("\\n")
|
||||||
|
case '\r':
|
||||||
|
e.WriteString("\\r")
|
||||||
|
case '\t':
|
||||||
|
e.WriteString("\\t")
|
||||||
|
default:
|
||||||
|
e.WriteRune(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.WriteByte('"')
|
||||||
|
var ret string
|
||||||
|
if needsQuotes {
|
||||||
|
ret = e.String()
|
||||||
|
} else {
|
||||||
|
ret = string(e.Bytes()[1 : e.Len()-1])
|
||||||
|
}
|
||||||
|
e.Reset()
|
||||||
|
stringBufPool.Put(e)
|
||||||
|
return ret
|
||||||
|
}
|
356
log/handler.go
Normal file
356
log/handler.go
Normal file
@ -0,0 +1,356 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-stack/stack"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Logger prints its log records by writing to a Handler.
|
||||||
|
// The Handler interface defines where and how log records are written.
|
||||||
|
// Handlers are composable, providing you great flexibility in combining
|
||||||
|
// them to achieve the logging structure that suits your applications.
|
||||||
|
type Handler interface {
|
||||||
|
Log(r *Record) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncHandler returns a Handler that logs records with the given
|
||||||
|
// function.
|
||||||
|
func FuncHandler(fn func(r *Record) error) Handler {
|
||||||
|
return funcHandler(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
type funcHandler func(r *Record) error
|
||||||
|
|
||||||
|
func (h funcHandler) Log(r *Record) error {
|
||||||
|
return h(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamHandler writes log records to an io.Writer
|
||||||
|
// with the given format. StreamHandler can be used
|
||||||
|
// to easily begin writing log records to other
|
||||||
|
// outputs.
|
||||||
|
//
|
||||||
|
// StreamHandler wraps itself with LazyHandler and SyncHandler
|
||||||
|
// to evaluate Lazy objects and perform safe concurrent writes.
|
||||||
|
func StreamHandler(wr io.Writer, fmtr Format) Handler {
|
||||||
|
h := FuncHandler(func(r *Record) error {
|
||||||
|
_, err := wr.Write(fmtr.Format(r))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return LazyHandler(SyncHandler(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncHandler can be wrapped around a handler to guarantee that
|
||||||
|
// only a single Log operation can proceed at a time. It's necessary
|
||||||
|
// for thread-safe concurrent writes.
|
||||||
|
func SyncHandler(h Handler) Handler {
|
||||||
|
var mu sync.Mutex
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
defer mu.Unlock()
|
||||||
|
mu.Lock()
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileHandler returns a handler which writes log records to the give file
|
||||||
|
// using the given format. If the path
|
||||||
|
// already exists, FileHandler will append to the given file. If it does not,
|
||||||
|
// FileHandler will create the file with mode 0644.
|
||||||
|
func FileHandler(path string, fmtr Format) (Handler, error) {
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return closingHandler{f, StreamHandler(f, fmtr)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetHandler opens a socket to the given address and writes records
|
||||||
|
// over the connection.
|
||||||
|
func NetHandler(network, addr string, fmtr Format) (Handler, error) {
|
||||||
|
conn, err := net.Dial(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: closingHandler is essentially unused at the moment
|
||||||
|
// it's meant for a future time when the Handler interface supports
|
||||||
|
// a possible Close() operation
|
||||||
|
type closingHandler struct {
|
||||||
|
io.WriteCloser
|
||||||
|
Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *closingHandler) Close() error {
|
||||||
|
return h.WriteCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerFileHandler returns a Handler that adds the line number and file of
|
||||||
|
// the calling function to the context with key "caller".
|
||||||
|
func CallerFileHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call))
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerFuncHandler returns a Handler that adds the calling function name to
|
||||||
|
// the context with key "fn".
|
||||||
|
func CallerFuncHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call))
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerStackHandler returns a Handler that adds a stack trace to the context
|
||||||
|
// with key "stack". The stack trace is formated as a space separated list of
|
||||||
|
// call sites inside matching []'s. The most recent call site is listed first.
|
||||||
|
// Each call site is formatted according to format. See the documentation of
|
||||||
|
// package github.com/go-stack/stack for the list of supported formats.
|
||||||
|
func CallerStackHandler(format string, h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
s := stack.Trace().TrimBelow(r.Call).TrimRuntime()
|
||||||
|
if len(s) > 0 {
|
||||||
|
r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s))
|
||||||
|
}
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterHandler returns a Handler that only writes records to the
|
||||||
|
// wrapped Handler if the given function evaluates true. For example,
|
||||||
|
// to only log records where the 'err' key is not nil:
|
||||||
|
//
|
||||||
|
// logger.SetHandler(FilterHandler(func(r *Record) bool {
|
||||||
|
// for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
// if r.Ctx[i] == "err" {
|
||||||
|
// return r.Ctx[i+1] != nil
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return false
|
||||||
|
// }, h))
|
||||||
|
//
|
||||||
|
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
if fn(r) {
|
||||||
|
return h.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchFilterHandler returns a Handler that only writes records
|
||||||
|
// to the wrapped Handler if the given key in the logged
|
||||||
|
// context matches the value. For example, to only log records
|
||||||
|
// from your ui package:
|
||||||
|
//
|
||||||
|
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
|
||||||
|
//
|
||||||
|
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
|
||||||
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
|
switch key {
|
||||||
|
case r.KeyNames.Lvl:
|
||||||
|
return r.Lvl == value
|
||||||
|
case r.KeyNames.Time:
|
||||||
|
return r.Time == value
|
||||||
|
case r.KeyNames.Msg:
|
||||||
|
return r.Msg == value
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
if r.Ctx[i] == key {
|
||||||
|
return r.Ctx[i+1] == value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LvlFilterHandler returns a Handler that only writes
|
||||||
|
// records which are less than the given verbosity
|
||||||
|
// level to the wrapped Handler. For example, to only
|
||||||
|
// log Error/Crit records:
|
||||||
|
//
|
||||||
|
// log.LvlFilterHandler(log.LvlError, log.StdoutHandler)
|
||||||
|
//
|
||||||
|
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
|
||||||
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
|
return r.Lvl <= maxLvl
|
||||||
|
}, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MultiHandler dispatches any write to each of its handlers.
|
||||||
|
// This is useful for writing different types of log information
|
||||||
|
// to different locations. For example, to log to a file and
|
||||||
|
// standard error:
|
||||||
|
//
|
||||||
|
// log.MultiHandler(
|
||||||
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
|
// log.StderrHandler)
|
||||||
|
//
|
||||||
|
func MultiHandler(hs ...Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
for _, h := range hs {
|
||||||
|
// what to do about failures?
|
||||||
|
h.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FailoverHandler writes all log records to the first handler
|
||||||
|
// specified, but will failover and write to the second handler if
|
||||||
|
// the first handler has failed, and so on for all handlers specified.
|
||||||
|
// For example you might want to log to a network socket, but failover
|
||||||
|
// to writing to a file if the network fails, and then to
|
||||||
|
// standard out if the file write fails:
|
||||||
|
//
|
||||||
|
// log.FailoverHandler(
|
||||||
|
// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
|
||||||
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
|
// log.StdoutHandler)
|
||||||
|
//
|
||||||
|
// All writes that do not go to the first handler will add context with keys of
|
||||||
|
// the form "failover_err_{idx}" which explain the error encountered while
|
||||||
|
// trying to write to the handlers before them in the list.
|
||||||
|
func FailoverHandler(hs ...Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
var err error
|
||||||
|
for i, h := range hs {
|
||||||
|
err = h.Log(r)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelHandler writes all records to the given channel.
|
||||||
|
// It blocks if the channel is full. Useful for async processing
|
||||||
|
// of log messages, it's used by BufferedHandler.
|
||||||
|
func ChannelHandler(recs chan<- *Record) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
recs <- r
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufferedHandler writes all records to a buffered
|
||||||
|
// channel of the given size which flushes into the wrapped
|
||||||
|
// handler whenever it is available for writing. Since these
|
||||||
|
// writes happen asynchronously, all writes to a BufferedHandler
|
||||||
|
// never return an error and any errors from the wrapped handler are ignored.
|
||||||
|
func BufferedHandler(bufSize int, h Handler) Handler {
|
||||||
|
recs := make(chan *Record, bufSize)
|
||||||
|
go func() {
|
||||||
|
for m := range recs {
|
||||||
|
_ = h.Log(m)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ChannelHandler(recs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LazyHandler writes all values to the wrapped handler after evaluating
|
||||||
|
// any lazy functions in the record's context. It is already wrapped
|
||||||
|
// around StreamHandler and SyslogHandler in this library, you'll only need
|
||||||
|
// it if you write your own Handler.
|
||||||
|
func LazyHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
// go through the values (odd indices) and reassign
|
||||||
|
// the values of any lazy fn to the result of its execution
|
||||||
|
hadErr := false
|
||||||
|
for i := 1; i < len(r.Ctx); i += 2 {
|
||||||
|
lz, ok := r.Ctx[i].(Lazy)
|
||||||
|
if ok {
|
||||||
|
v, err := evaluateLazy(lz)
|
||||||
|
if err != nil {
|
||||||
|
hadErr = true
|
||||||
|
r.Ctx[i] = err
|
||||||
|
} else {
|
||||||
|
if cs, ok := v.(stack.CallStack); ok {
|
||||||
|
v = cs.TrimBelow(r.Call).TrimRuntime()
|
||||||
|
}
|
||||||
|
r.Ctx[i] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hadErr {
|
||||||
|
r.Ctx = append(r.Ctx, errorKey, "bad lazy")
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateLazy(lz Lazy) (interface{}, error) {
|
||||||
|
t := reflect.TypeOf(lz.Fn)
|
||||||
|
|
||||||
|
if t.Kind() != reflect.Func {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.NumIn() > 0 {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.NumOut() == 0 {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.ValueOf(lz.Fn)
|
||||||
|
results := value.Call([]reflect.Value{})
|
||||||
|
if len(results) == 1 {
|
||||||
|
return results[0].Interface(), nil
|
||||||
|
} else {
|
||||||
|
values := make([]interface{}, len(results))
|
||||||
|
for i, v := range results {
|
||||||
|
values[i] = v.Interface()
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardHandler reports success for all writes but does nothing.
|
||||||
|
// It is useful for dynamically disabling logging at runtime via
|
||||||
|
// a Logger's SetHandler method.
|
||||||
|
func DiscardHandler() Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Must object provides the following Handler creation functions
|
||||||
|
// which instead of returning an error parameter only return a Handler
|
||||||
|
// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
|
||||||
|
var Must muster
|
||||||
|
|
||||||
|
func must(h Handler, err error) Handler {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
type muster struct{}
|
||||||
|
|
||||||
|
func (m muster) FileHandler(path string, fmtr Format) Handler {
|
||||||
|
return must(FileHandler(path, fmtr))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
|
||||||
|
return must(NetHandler(network, addr, fmtr))
|
||||||
|
}
|
227
log/handler_glog.go
Normal file
227
log/handler_glog.go
Normal file
@ -0,0 +1,227 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// errVmoduleSyntax is returned when a user vmodule pattern is invalid.
|
||||||
|
var errVmoduleSyntax = errors.New("expect comma-separated list of filename=N")
|
||||||
|
|
||||||
|
// errTraceSyntax is returned when a user backtrace pattern is invalid.
|
||||||
|
var errTraceSyntax = errors.New("expect file.go:234")
|
||||||
|
|
||||||
|
// GlogHandler is a log handler that mimics the filtering features of Google's
|
||||||
|
// glog logger: setting global log levels; overriding with callsite pattern
|
||||||
|
// matches; and requesting backtraces at certain positions.
|
||||||
|
type GlogHandler struct {
|
||||||
|
origin Handler // The origin handler this wraps
|
||||||
|
|
||||||
|
level uint32 // Current log level, atomically accessible
|
||||||
|
override uint32 // Flag whether overrides are used, atomically accessible
|
||||||
|
backtrace uint32 // Flag whether backtrace location is set
|
||||||
|
|
||||||
|
patterns []pattern // Current list of patterns to override with
|
||||||
|
siteCache map[uintptr]Lvl // Cache of callsite pattern evaluations
|
||||||
|
location string // file:line location where to do a stackdump at
|
||||||
|
lock sync.RWMutex // Lock protecting the override pattern list
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGlogHandler creates a new log handler with filtering functionality similar
|
||||||
|
// to Google's glog logger. The returned handler implements Handler.
|
||||||
|
func NewGlogHandler(h Handler) *GlogHandler {
|
||||||
|
return &GlogHandler{
|
||||||
|
origin: h,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pattern contains a filter for the Vmodule option, holding a verbosity level
|
||||||
|
// and a file pattern to match.
|
||||||
|
type pattern struct {
|
||||||
|
pattern *regexp.Regexp
|
||||||
|
level Lvl
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verbosity sets the glog verbosity ceiling. The verbosity of individual packages
|
||||||
|
// and source files can be raised using Vmodule.
|
||||||
|
func (h *GlogHandler) Verbosity(level Lvl) {
|
||||||
|
atomic.StoreUint32(&h.level, uint32(level))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Vmodule sets the glog verbosity pattern.
|
||||||
|
//
|
||||||
|
// The syntax of the argument is a comma-separated list of pattern=N, where the
|
||||||
|
// pattern is a literal file name or "glob" pattern matching and N is a V level.
|
||||||
|
//
|
||||||
|
// For instance:
|
||||||
|
//
|
||||||
|
// pattern="gopher.go=3"
|
||||||
|
// sets the V level to 3 in all Go files named "gopher.go"
|
||||||
|
//
|
||||||
|
// pattern="foo=3"
|
||||||
|
// sets V to 3 in all files of any packages whose import path ends in "foo"
|
||||||
|
//
|
||||||
|
// pattern="foo/*=3"
|
||||||
|
// sets V to 3 in all files of any packages whose import path contains "foo"
|
||||||
|
func (h *GlogHandler) Vmodule(ruleset string) error {
|
||||||
|
var filter []pattern
|
||||||
|
for _, rule := range strings.Split(ruleset, ",") {
|
||||||
|
// Empty strings such as from a trailing comma can be ignored
|
||||||
|
if len(rule) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Ensure we have a pattern = level filter rule
|
||||||
|
parts := strings.Split(rule, "=")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errVmoduleSyntax
|
||||||
|
}
|
||||||
|
parts[0] = strings.TrimSpace(parts[0])
|
||||||
|
parts[1] = strings.TrimSpace(parts[1])
|
||||||
|
if len(parts[0]) == 0 || len(parts[1]) == 0 {
|
||||||
|
return errVmoduleSyntax
|
||||||
|
}
|
||||||
|
// Parse the level and if correct, assemble the filter rule
|
||||||
|
level, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return errVmoduleSyntax
|
||||||
|
}
|
||||||
|
if level <= 0 {
|
||||||
|
continue // Ignore. It's harmless but no point in paying the overhead.
|
||||||
|
}
|
||||||
|
// Compile the rule pattern into a regular expression
|
||||||
|
matcher := ".*"
|
||||||
|
for _, comp := range strings.Split(parts[0], "/") {
|
||||||
|
if comp == "*" {
|
||||||
|
matcher += "(/.*)?"
|
||||||
|
} else if comp != "" {
|
||||||
|
matcher += "/" + regexp.QuoteMeta(comp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(parts[0], ".go") {
|
||||||
|
matcher += "/[^/]+\\.go"
|
||||||
|
}
|
||||||
|
matcher = matcher + "$"
|
||||||
|
|
||||||
|
re, _ := regexp.Compile(matcher)
|
||||||
|
filter = append(filter, pattern{re, Lvl(level)})
|
||||||
|
}
|
||||||
|
// Swap out the vmodule pattern for the new filter system
|
||||||
|
h.lock.Lock()
|
||||||
|
defer h.lock.Unlock()
|
||||||
|
|
||||||
|
h.patterns = filter
|
||||||
|
h.siteCache = make(map[uintptr]Lvl)
|
||||||
|
atomic.StoreUint32(&h.override, uint32(len(filter)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BacktraceAt sets the glog backtrace location. When set to a file and line
|
||||||
|
// number holding a logging statement, a stack trace will be written to the Info
|
||||||
|
// log whenever execution hits that statement.
|
||||||
|
//
|
||||||
|
// Unlike with Vmodule, the ".go" must be present.
|
||||||
|
func (h *GlogHandler) BacktraceAt(location string) error {
|
||||||
|
// Ensure the backtrace location contains two non-empty elements
|
||||||
|
parts := strings.Split(location, ":")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errTraceSyntax
|
||||||
|
}
|
||||||
|
parts[0] = strings.TrimSpace(parts[0])
|
||||||
|
parts[1] = strings.TrimSpace(parts[1])
|
||||||
|
if len(parts[0]) == 0 || len(parts[1]) == 0 {
|
||||||
|
return errTraceSyntax
|
||||||
|
}
|
||||||
|
// Ensure the .go prefix is present and the line is valid
|
||||||
|
if !strings.HasSuffix(parts[0], ".go") {
|
||||||
|
return errTraceSyntax
|
||||||
|
}
|
||||||
|
if _, err := strconv.Atoi(parts[1]); err != nil {
|
||||||
|
return errTraceSyntax
|
||||||
|
}
|
||||||
|
// All seems valid
|
||||||
|
h.lock.Lock()
|
||||||
|
defer h.lock.Unlock()
|
||||||
|
|
||||||
|
h.location = location
|
||||||
|
atomic.StoreUint32(&h.backtrace, uint32(len(location)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log implements Handler.Log, filtering a log record through the global, local
|
||||||
|
// and backtrace filters, finally emitting it if either allow it through.
|
||||||
|
func (h *GlogHandler) Log(r *Record) error {
|
||||||
|
// If backtracing is requested, check whether this is the callsite
|
||||||
|
if atomic.LoadUint32(&h.backtrace) > 0 {
|
||||||
|
// Everything below here is slow. Although we could cache the call sites the
|
||||||
|
// same way as for vmodule, backtracing is so rare it's not worth the extra
|
||||||
|
// complexity.
|
||||||
|
h.lock.RLock()
|
||||||
|
match := h.location == r.Call.String()
|
||||||
|
h.lock.RUnlock()
|
||||||
|
|
||||||
|
if match {
|
||||||
|
// Callsite matched, raise the log level to info and gather the stacks
|
||||||
|
r.Lvl = LvlInfo
|
||||||
|
|
||||||
|
buf := make([]byte, 1024*1024)
|
||||||
|
buf = buf[:runtime.Stack(buf, true)]
|
||||||
|
r.Msg += "\n\n" + string(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If the global log level allows, fast track logging
|
||||||
|
if atomic.LoadUint32(&h.level) >= uint32(r.Lvl) {
|
||||||
|
return h.origin.Log(r)
|
||||||
|
}
|
||||||
|
// If no local overrides are present, fast track skipping
|
||||||
|
if atomic.LoadUint32(&h.override) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Check callsite cache for previously calculated log levels
|
||||||
|
h.lock.RLock()
|
||||||
|
lvl, ok := h.siteCache[r.Call.PC()]
|
||||||
|
h.lock.RUnlock()
|
||||||
|
|
||||||
|
// If we didn't cache the callsite yet, calculate it
|
||||||
|
if !ok {
|
||||||
|
h.lock.Lock()
|
||||||
|
for _, rule := range h.patterns {
|
||||||
|
if rule.pattern.MatchString(fmt.Sprintf("%+s", r.Call)) {
|
||||||
|
h.siteCache[r.Call.PC()], lvl, ok = rule.level, rule.level, true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If no rule matched, remember to drop log the next time
|
||||||
|
if !ok {
|
||||||
|
h.siteCache[r.Call.PC()] = 0
|
||||||
|
}
|
||||||
|
h.lock.Unlock()
|
||||||
|
}
|
||||||
|
if lvl >= r.Lvl {
|
||||||
|
return h.origin.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
26
log/handler_go13.go
Normal file
26
log/handler_go13.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// +build !go1.4
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// swapHandler wraps another handler that may be swapped out
|
||||||
|
// dynamically at runtime in a thread-safe fashion.
|
||||||
|
type swapHandler struct {
|
||||||
|
handler unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Log(r *Record) error {
|
||||||
|
return h.Get().Log(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Get() Handler {
|
||||||
|
return *(*Handler)(atomic.LoadPointer(&h.handler))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Swap(newHandler Handler) {
|
||||||
|
atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
|
||||||
|
}
|
23
log/handler_go14.go
Normal file
23
log/handler_go14.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// +build go1.4
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import "sync/atomic"
|
||||||
|
|
||||||
|
// swapHandler wraps another handler that may be swapped out
|
||||||
|
// dynamically at runtime in a thread-safe fashion.
|
||||||
|
type swapHandler struct {
|
||||||
|
handler atomic.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Log(r *Record) error {
|
||||||
|
return (*h.handler.Load().(*Handler)).Log(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Swap(newHandler Handler) {
|
||||||
|
h.handler.Store(&newHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Get() Handler {
|
||||||
|
return *h.handler.Load().(*Handler)
|
||||||
|
}
|
220
log/logger.go
Normal file
220
log/logger.go
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-stack/stack"
|
||||||
|
)
|
||||||
|
|
||||||
|
const timeKey = "t"
|
||||||
|
const lvlKey = "lvl"
|
||||||
|
const msgKey = "msg"
|
||||||
|
const errorKey = "LOG15_ERROR"
|
||||||
|
|
||||||
|
type Lvl int
|
||||||
|
|
||||||
|
const (
|
||||||
|
LvlCrit Lvl = iota
|
||||||
|
LvlError
|
||||||
|
LvlWarn
|
||||||
|
LvlInfo
|
||||||
|
LvlDebug
|
||||||
|
LvlTrace
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns the name of a Lvl
|
||||||
|
func (l Lvl) String() string {
|
||||||
|
switch l {
|
||||||
|
case LvlTrace:
|
||||||
|
return "trce"
|
||||||
|
case LvlDebug:
|
||||||
|
return "dbug"
|
||||||
|
case LvlInfo:
|
||||||
|
return "info"
|
||||||
|
case LvlWarn:
|
||||||
|
return "warn"
|
||||||
|
case LvlError:
|
||||||
|
return "eror"
|
||||||
|
case LvlCrit:
|
||||||
|
return "crit"
|
||||||
|
default:
|
||||||
|
panic("bad level")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the appropriate Lvl from a string name.
|
||||||
|
// Useful for parsing command line args and configuration files.
|
||||||
|
func LvlFromString(lvlString string) (Lvl, error) {
|
||||||
|
switch lvlString {
|
||||||
|
case "trace", "trce":
|
||||||
|
return LvlTrace, nil
|
||||||
|
case "debug", "dbug":
|
||||||
|
return LvlDebug, nil
|
||||||
|
case "info":
|
||||||
|
return LvlInfo, nil
|
||||||
|
case "warn":
|
||||||
|
return LvlWarn, nil
|
||||||
|
case "error", "eror":
|
||||||
|
return LvlError, nil
|
||||||
|
case "crit":
|
||||||
|
return LvlCrit, nil
|
||||||
|
default:
|
||||||
|
return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Record is what a Logger asks its handler to write
|
||||||
|
type Record struct {
|
||||||
|
Time time.Time
|
||||||
|
Lvl Lvl
|
||||||
|
Msg string
|
||||||
|
Ctx []interface{}
|
||||||
|
Call stack.Call
|
||||||
|
KeyNames RecordKeyNames
|
||||||
|
}
|
||||||
|
|
||||||
|
type RecordKeyNames struct {
|
||||||
|
Time string
|
||||||
|
Msg string
|
||||||
|
Lvl string
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Logger writes key/value pairs to a Handler
|
||||||
|
type Logger interface {
|
||||||
|
// New returns a new Logger that has this logger's context plus the given context
|
||||||
|
New(ctx ...interface{}) Logger
|
||||||
|
|
||||||
|
// GetHandler gets the handler associated with the logger.
|
||||||
|
GetHandler() Handler
|
||||||
|
|
||||||
|
// SetHandler updates the logger to write records to the specified handler.
|
||||||
|
SetHandler(h Handler)
|
||||||
|
|
||||||
|
// Log a message at the given level with context key/value pairs
|
||||||
|
Trace(msg string, ctx ...interface{})
|
||||||
|
Debug(msg string, ctx ...interface{})
|
||||||
|
Info(msg string, ctx ...interface{})
|
||||||
|
Warn(msg string, ctx ...interface{})
|
||||||
|
Error(msg string, ctx ...interface{})
|
||||||
|
Crit(msg string, ctx ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
ctx []interface{}
|
||||||
|
h *swapHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
|
||||||
|
l.h.Log(&Record{
|
||||||
|
Time: time.Now(),
|
||||||
|
Lvl: lvl,
|
||||||
|
Msg: msg,
|
||||||
|
Ctx: newContext(l.ctx, ctx),
|
||||||
|
Call: stack.Caller(2),
|
||||||
|
KeyNames: RecordKeyNames{
|
||||||
|
Time: timeKey,
|
||||||
|
Msg: msgKey,
|
||||||
|
Lvl: lvlKey,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) New(ctx ...interface{}) Logger {
|
||||||
|
child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
|
||||||
|
child.SetHandler(l.h)
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
|
||||||
|
normalizedSuffix := normalize(suffix)
|
||||||
|
newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
|
||||||
|
n := copy(newCtx, prefix)
|
||||||
|
copy(newCtx[n:], normalizedSuffix)
|
||||||
|
return newCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Trace(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlTrace, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Debug(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlDebug, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Info(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlInfo, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Warn(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlWarn, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Error(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlError, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Crit(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlCrit, ctx)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) GetHandler() Handler {
|
||||||
|
return l.h.Get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) SetHandler(h Handler) {
|
||||||
|
l.h.Swap(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalize(ctx []interface{}) []interface{} {
|
||||||
|
// if the caller passed a Ctx object, then expand it
|
||||||
|
if len(ctx) == 1 {
|
||||||
|
if ctxMap, ok := ctx[0].(Ctx); ok {
|
||||||
|
ctx = ctxMap.toArray()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ctx needs to be even because it's a series of key/value pairs
|
||||||
|
// no one wants to check for errors on logging functions,
|
||||||
|
// so instead of erroring on bad input, we'll just make sure
|
||||||
|
// that things are the right length and users can fix bugs
|
||||||
|
// when they see the output looks wrong
|
||||||
|
if len(ctx)%2 != 0 {
|
||||||
|
ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lazy allows you to defer calculation of a logged value that is expensive
|
||||||
|
// to compute until it is certain that it must be evaluated with the given filters.
|
||||||
|
//
|
||||||
|
// Lazy may also be used in conjunction with a Logger's New() function
|
||||||
|
// to generate a child logger which always reports the current value of changing
|
||||||
|
// state.
|
||||||
|
//
|
||||||
|
// You may wrap any function which takes no arguments to Lazy. It may return any
|
||||||
|
// number of values of any type.
|
||||||
|
type Lazy struct {
|
||||||
|
Fn interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ctx is a map of key/value pairs to pass as context to a log function
|
||||||
|
// Use this only if you really need greater safety around the arguments you pass
|
||||||
|
// to the logging functions.
|
||||||
|
type Ctx map[string]interface{}
|
||||||
|
|
||||||
|
func (c Ctx) toArray() []interface{} {
|
||||||
|
arr := make([]interface{}, len(c)*2)
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for k, v := range c {
|
||||||
|
arr[i] = k
|
||||||
|
arr[i+1] = v
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
|
||||||
|
return arr
|
||||||
|
}
|
73
log/root.go
Normal file
73
log/root.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log/term"
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
root *logger
|
||||||
|
StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
|
||||||
|
StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if term.IsTty(os.Stdout.Fd()) {
|
||||||
|
StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
|
||||||
|
}
|
||||||
|
|
||||||
|
if term.IsTty(os.Stderr.Fd()) {
|
||||||
|
StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
|
||||||
|
}
|
||||||
|
|
||||||
|
root = &logger{[]interface{}{}, new(swapHandler)}
|
||||||
|
root.SetHandler(LvlFilterHandler(LvlInfo, StdoutHandler))
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new logger with the given context.
|
||||||
|
// New is a convenient alias for Root().New
|
||||||
|
func New(ctx ...interface{}) Logger {
|
||||||
|
return root.New(ctx...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns the root logger
|
||||||
|
func Root() Logger {
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following functions bypass the exported logger methods (logger.Debug,
|
||||||
|
// etc.) to keep the call depth the same for all paths to logger.write so
|
||||||
|
// runtime.Caller(2) always refers to the call site in client code.
|
||||||
|
|
||||||
|
// Trace is a convenient alias for Root().Trace
|
||||||
|
func Trace(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlTrace, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug is a convenient alias for Root().Debug
|
||||||
|
func Debug(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlDebug, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info is a convenient alias for Root().Info
|
||||||
|
func Info(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlInfo, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn is a convenient alias for Root().Warn
|
||||||
|
func Warn(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlWarn, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is a convenient alias for Root().Error
|
||||||
|
func Error(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlError, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crit is a convenient alias for Root().Crit
|
||||||
|
func Crit(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlCrit, ctx)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
57
log/syslog.go
Normal file
57
log/syslog.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// +build !windows,!plan9
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/syslog"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyslogHandler opens a connection to the system syslog daemon by calling
|
||||||
|
// syslog.New and writes all records to it.
|
||||||
|
func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
|
||||||
|
wr, err := syslog.New(priority, tag)
|
||||||
|
return sharedSyslog(fmtr, wr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyslogNetHandler opens a connection to a log daemon over the network and writes
|
||||||
|
// all log records to it.
|
||||||
|
func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
|
||||||
|
wr, err := syslog.Dial(net, addr, priority, tag)
|
||||||
|
return sharedSyslog(fmtr, wr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
h := FuncHandler(func(r *Record) error {
|
||||||
|
var syslogFn = sysWr.Info
|
||||||
|
switch r.Lvl {
|
||||||
|
case LvlCrit:
|
||||||
|
syslogFn = sysWr.Crit
|
||||||
|
case LvlError:
|
||||||
|
syslogFn = sysWr.Err
|
||||||
|
case LvlWarn:
|
||||||
|
syslogFn = sysWr.Warning
|
||||||
|
case LvlInfo:
|
||||||
|
syslogFn = sysWr.Info
|
||||||
|
case LvlDebug:
|
||||||
|
syslogFn = sysWr.Debug
|
||||||
|
case LvlTrace:
|
||||||
|
syslogFn = func(m string) error { return nil } // There's no syslog level for trace
|
||||||
|
}
|
||||||
|
|
||||||
|
s := strings.TrimSpace(string(fmtr.Format(r)))
|
||||||
|
return syslogFn(s)
|
||||||
|
})
|
||||||
|
return LazyHandler(&closingHandler{sysWr, h}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler {
|
||||||
|
return must(SyslogHandler(priority, tag, fmtr))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler {
|
||||||
|
return must(SyslogNetHandler(net, addr, priority, tag, fmtr))
|
||||||
|
}
|
21
log/term/LICENSE
Normal file
21
log/term/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
13
log/term/terminal_appengine.go
Normal file
13
log/term/terminal_appengine.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package term
|
||||||
|
|
||||||
|
// IsTty always returns false on AppEngine.
|
||||||
|
func IsTty(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
13
log/term/terminal_darwin.go
Normal file
13
log/term/terminal_darwin.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package term
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
18
log/term/terminal_freebsd.go
Normal file
18
log/term/terminal_freebsd.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package term
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
|
||||||
|
type Termios struct {
|
||||||
|
Iflag uint32
|
||||||
|
Oflag uint32
|
||||||
|
Cflag uint32
|
||||||
|
Lflag uint32
|
||||||
|
Cc [20]uint8
|
||||||
|
Ispeed uint32
|
||||||
|
Ospeed uint32
|
||||||
|
}
|
14
log/term/terminal_linux.go
Normal file
14
log/term/terminal_linux.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package term
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TCGETS
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
7
log/term/terminal_netbsd.go
Normal file
7
log/term/terminal_netbsd.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package term
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
20
log/term/terminal_notwindows.go
Normal file
20
log/term/terminal_notwindows.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux,!appengine darwin freebsd openbsd netbsd
|
||||||
|
|
||||||
|
package term
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTty returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTty(fd uintptr) bool {
|
||||||
|
var termios Termios
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
|
return err == 0
|
||||||
|
}
|
7
log/term/terminal_openbsd.go
Normal file
7
log/term/terminal_openbsd.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package term
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
9
log/term/terminal_solaris.go
Normal file
9
log/term/terminal_solaris.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package term
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// IsTty returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTty(fd uintptr) bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
26
log/term/terminal_windows.go
Normal file
26
log/term/terminal_windows.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package term
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTty returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTty(fd uintptr) bool {
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
@ -1,191 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction, and
|
|
||||||
distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
|
||||||
owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
|
||||||
that control, are controlled by, or are under common control with that entity.
|
|
||||||
For the purposes of this definition, "control" means (i) the power, direct or
|
|
||||||
indirect, to cause the direction or management of such entity, whether by
|
|
||||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
|
||||||
permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications, including
|
|
||||||
but not limited to software source code, documentation source, and configuration
|
|
||||||
files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical transformation or
|
|
||||||
translation of a Source form, including but not limited to compiled object code,
|
|
||||||
generated documentation, and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
|
||||||
available under the License, as indicated by a copyright notice that is included
|
|
||||||
in or attached to the work (an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
|
||||||
is based on (or derived from) the Work and for which the editorial revisions,
|
|
||||||
annotations, elaborations, or other modifications represent, as a whole, an
|
|
||||||
original work of authorship. For the purposes of this License, Derivative Works
|
|
||||||
shall not include works that remain separable from, or merely link (or bind by
|
|
||||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including the original version
|
|
||||||
of the Work and any modifications or additions to that Work or Derivative Works
|
|
||||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
|
||||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
|
||||||
on behalf of the copyright owner. For the purposes of this definition,
|
|
||||||
"submitted" means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems, and
|
|
||||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
|
||||||
the purpose of discussing and improving the Work, but excluding communication
|
|
||||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
|
||||||
owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
|
||||||
of whom a Contribution has been received by Licensor and subsequently
|
|
||||||
incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
|
||||||
Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable (except as stated in this section) patent license to make, have
|
|
||||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
|
||||||
such license applies only to those patent claims licensable by such Contributor
|
|
||||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
|
||||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
|
||||||
submitted. If You institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
|
||||||
Contribution incorporated within the Work constitutes direct or contributory
|
|
||||||
patent infringement, then any patent licenses granted to You under this License
|
|
||||||
for that Work shall terminate as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution.
|
|
||||||
|
|
||||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
|
||||||
in any medium, with or without modifications, and in Source or Object form,
|
|
||||||
provided that You meet the following conditions:
|
|
||||||
|
|
||||||
You must give any other recipients of the Work or Derivative Works a copy of
|
|
||||||
this License; and
|
|
||||||
You must cause any modified files to carry prominent notices stating that You
|
|
||||||
changed the files; and
|
|
||||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
|
||||||
all copyright, patent, trademark, and attribution notices from the Source form
|
|
||||||
of the Work, excluding those notices that do not pertain to any part of the
|
|
||||||
Derivative Works; and
|
|
||||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
|
||||||
Derivative Works that You distribute must include a readable copy of the
|
|
||||||
attribution notices contained within such NOTICE file, excluding those notices
|
|
||||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
|
||||||
following places: within a NOTICE text file distributed as part of the
|
|
||||||
Derivative Works; within the Source form or documentation, if provided along
|
|
||||||
with the Derivative Works; or, within a display generated by the Derivative
|
|
||||||
Works, if and wherever such third-party notices normally appear. The contents of
|
|
||||||
the NOTICE file are for informational purposes only and do not modify the
|
|
||||||
License. You may add Your own attribution notices within Derivative Works that
|
|
||||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
|
||||||
provided that such additional attribution notices cannot be construed as
|
|
||||||
modifying the License.
|
|
||||||
You may add Your own copyright statement to Your modifications and may provide
|
|
||||||
additional or different license terms and conditions for use, reproduction, or
|
|
||||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
|
||||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
|
||||||
with the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions.
|
|
||||||
|
|
||||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
|
||||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
|
||||||
conditions of this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
|
||||||
any separate license agreement you may have executed with Licensor regarding
|
|
||||||
such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks.
|
|
||||||
|
|
||||||
This License does not grant permission to use the trade names, trademarks,
|
|
||||||
service marks, or product names of the Licensor, except as required for
|
|
||||||
reasonable and customary use in describing the origin of the Work and
|
|
||||||
reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
|
||||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
|
||||||
including, without limitation, any warranties or conditions of TITLE,
|
|
||||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
|
||||||
solely responsible for determining the appropriateness of using or
|
|
||||||
redistributing the Work and assume any risks associated with Your exercise of
|
|
||||||
permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability.
|
|
||||||
|
|
||||||
In no event and under no legal theory, whether in tort (including negligence),
|
|
||||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
|
||||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special, incidental,
|
|
||||||
or consequential damages of any character arising as a result of this License or
|
|
||||||
out of the use or inability to use the Work (including but not limited to
|
|
||||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
|
||||||
any and all other commercial damages or losses), even if such Contributor has
|
|
||||||
been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability.
|
|
||||||
|
|
||||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
|
||||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
|
||||||
other liability obligations and/or rights consistent with this License. However,
|
|
||||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
|
||||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
|
||||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason of your
|
|
||||||
accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following boilerplate
|
|
||||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
|
||||||
identifying information. (Don't include the brackets!) The text should be
|
|
||||||
enclosed in the appropriate comment syntax for the file format. We also
|
|
||||||
recommend that a file or class name and description of purpose be included on
|
|
||||||
the same "printed page" as the copyright notice for easier identification within
|
|
||||||
third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,44 +0,0 @@
|
|||||||
glog
|
|
||||||
====
|
|
||||||
|
|
||||||
Leveled execution logs for Go.
|
|
||||||
|
|
||||||
This is an efficient pure Go implementation of leveled logs in the
|
|
||||||
manner of the open source C++ package
|
|
||||||
http://code.google.com/p/google-glog
|
|
||||||
|
|
||||||
By binding methods to booleans it is possible to use the log package
|
|
||||||
without paying the expense of evaluating the arguments to the log.
|
|
||||||
Through the -vmodule flag, the package also provides fine-grained
|
|
||||||
control over logging at the file level.
|
|
||||||
|
|
||||||
The comment from glog.go introduces the ideas:
|
|
||||||
|
|
||||||
Package glog implements logging analogous to the Google-internal
|
|
||||||
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
|
||||||
Error, Fatal, plus formatting variants such as Infof. It
|
|
||||||
also provides V-style logging controlled by the -v and
|
|
||||||
-vmodule=file=2 flags.
|
|
||||||
|
|
||||||
Basic examples:
|
|
||||||
|
|
||||||
glog.Info("Prepare to repel boarders")
|
|
||||||
|
|
||||||
glog.Fatalf("Initialization failed: %s", err)
|
|
||||||
|
|
||||||
See the documentation for the V function for an explanation
|
|
||||||
of these examples:
|
|
||||||
|
|
||||||
if glog.V(2) {
|
|
||||||
glog.Info("Starting transaction...")
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
|
||||||
|
|
||||||
|
|
||||||
The repository contains an open source version of the log package
|
|
||||||
used inside Google. The master copy of the source lives inside
|
|
||||||
Google, not here. The code in this repo is for export only and is not itself
|
|
||||||
under development. Feature requests will be ignored.
|
|
||||||
|
|
||||||
Send bug reports to golang-nuts@googlegroups.com.
|
|
1223
logger/glog/glog.go
1223
logger/glog/glog.go
File diff suppressed because it is too large
Load Diff
@ -1,128 +0,0 @@
|
|||||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
|
||||||
//
|
|
||||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// File I/O for logs.
|
|
||||||
|
|
||||||
package glog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MaxSize is the maximum size of a log file in bytes.
|
|
||||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
|
||||||
|
|
||||||
// logDirs lists the candidate directories for new log files.
|
|
||||||
var logDirs []string
|
|
||||||
|
|
||||||
// If non-empty, overrides the choice of directory in which to write logs.
|
|
||||||
// See createLogDirs for the full list of possible destinations.
|
|
||||||
//var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
|
||||||
var logDir *string = new(string)
|
|
||||||
|
|
||||||
func SetLogDir(str string) {
|
|
||||||
*logDir = str
|
|
||||||
}
|
|
||||||
|
|
||||||
func createLogDirs() {
|
|
||||||
if *logDir != "" {
|
|
||||||
logDirs = append(logDirs, *logDir)
|
|
||||||
}
|
|
||||||
logDirs = append(logDirs, os.TempDir())
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pid = os.Getpid()
|
|
||||||
program = filepath.Base(os.Args[0])
|
|
||||||
host = "unknownhost"
|
|
||||||
userName = "unknownuser"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
h, err := os.Hostname()
|
|
||||||
if err == nil {
|
|
||||||
host = shortHostname(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
current, err := user.Current()
|
|
||||||
if err == nil {
|
|
||||||
userName = current.Username
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize userName since it may contain filepath separators on Windows.
|
|
||||||
userName = strings.Replace(userName, `\`, "_", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortHostname returns its argument, truncating at the first period.
|
|
||||||
// For instance, given "www.google.com" it returns "www".
|
|
||||||
func shortHostname(hostname string) string {
|
|
||||||
if i := strings.Index(hostname, "."); i >= 0 {
|
|
||||||
return hostname[:i]
|
|
||||||
}
|
|
||||||
return hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// logName returns a new log file name containing tag, with start time t, and
|
|
||||||
// the name for the symlink for tag.
|
|
||||||
func logName(tag string, t time.Time) (name, link string) {
|
|
||||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
|
||||||
program,
|
|
||||||
host,
|
|
||||||
userName,
|
|
||||||
tag,
|
|
||||||
t.Year(),
|
|
||||||
t.Month(),
|
|
||||||
t.Day(),
|
|
||||||
t.Hour(),
|
|
||||||
t.Minute(),
|
|
||||||
t.Second(),
|
|
||||||
pid)
|
|
||||||
return name, program + "." + tag
|
|
||||||
}
|
|
||||||
|
|
||||||
var onceLogDirs sync.Once
|
|
||||||
|
|
||||||
// create creates a new log file and returns the file and its filename, which
|
|
||||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
|
||||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
|
||||||
// errors.
|
|
||||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
|
||||||
onceLogDirs.Do(createLogDirs)
|
|
||||||
if len(logDirs) == 0 {
|
|
||||||
return nil, "", errors.New("log: no log dirs")
|
|
||||||
}
|
|
||||||
name, link := logName(tag, t)
|
|
||||||
var lastErr error
|
|
||||||
for _, dir := range logDirs {
|
|
||||||
fname := filepath.Join(dir, name)
|
|
||||||
f, err := os.Create(fname)
|
|
||||||
if err == nil {
|
|
||||||
symlink := filepath.Join(dir, link)
|
|
||||||
os.Remove(symlink) // ignore err
|
|
||||||
os.Symlink(name, symlink) // ignore err
|
|
||||||
return f, fname, nil
|
|
||||||
}
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
|
||||||
}
|
|
@ -1,436 +0,0 @@
|
|||||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
|
||||||
//
|
|
||||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package glog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
stdLog "log"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test that shortHostname works as advertised.
|
|
||||||
func TestShortHostname(t *testing.T) {
|
|
||||||
for hostname, expect := range map[string]string{
|
|
||||||
"": "",
|
|
||||||
"host": "host",
|
|
||||||
"host.google.com": "host",
|
|
||||||
} {
|
|
||||||
if got := shortHostname(hostname); expect != got {
|
|
||||||
t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
|
|
||||||
type flushBuffer struct {
|
|
||||||
bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flushBuffer) Flush() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flushBuffer) Sync() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// swap sets the log writers and returns the old array.
|
|
||||||
func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
old = l.file
|
|
||||||
for i, w := range writers {
|
|
||||||
logging.file[i] = w
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBuffers sets the log writers to all new byte buffers and returns the old array.
|
|
||||||
func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
|
|
||||||
return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
|
|
||||||
}
|
|
||||||
|
|
||||||
// contents returns the specified log value as a string.
|
|
||||||
func contents(s severity) string {
|
|
||||||
return logging.file[s].(*flushBuffer).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// contains reports whether the string is contained in the log.
|
|
||||||
func contains(s severity, str string, t *testing.T) bool {
|
|
||||||
return strings.Contains(contents(s), str)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setFlags configures the logging flags how the test expects them.
|
|
||||||
func setFlags() {
|
|
||||||
logging.toStderr = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that Info works as advertised.
|
|
||||||
func TestInfo(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
Info("test")
|
|
||||||
if !contains(infoLog, "I", t) {
|
|
||||||
t.Errorf("Info has wrong character: %q", contents(infoLog))
|
|
||||||
}
|
|
||||||
if !contains(infoLog, "test", t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInfoDepth(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
|
|
||||||
f := func() { InfoDepth(1, "depth-test1") }
|
|
||||||
|
|
||||||
// The next three lines must stay together
|
|
||||||
_, _, wantLine, _ := runtime.Caller(0)
|
|
||||||
InfoDepth(0, "depth-test0")
|
|
||||||
f()
|
|
||||||
|
|
||||||
msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
|
|
||||||
if len(msgs) != 2 {
|
|
||||||
t.Fatalf("Got %d lines, expected 2", len(msgs))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, m := range msgs {
|
|
||||||
if !strings.HasPrefix(m, "I") {
|
|
||||||
t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
|
|
||||||
}
|
|
||||||
w := fmt.Sprintf("depth-test%d", i)
|
|
||||||
if !strings.Contains(m, w) {
|
|
||||||
t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pull out the line number (between : and ])
|
|
||||||
msg := m[strings.LastIndex(m, ":")+1:]
|
|
||||||
x := strings.Index(msg, "]")
|
|
||||||
if x < 0 {
|
|
||||||
t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line, err := strconv.Atoi(msg[:x])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
wantLine++
|
|
||||||
if wantLine != line {
|
|
||||||
t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
CopyStandardLogTo("INFO")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that CopyStandardLogTo panics on bad input.
|
|
||||||
func TestCopyStandardLogToPanic(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
|
|
||||||
t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
CopyStandardLogTo("LOG")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that using the standard log package logs to INFO.
|
|
||||||
func TestStandardLog(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
stdLog.Print("test")
|
|
||||||
if !contains(infoLog, "I", t) {
|
|
||||||
t.Errorf("Info has wrong character: %q", contents(infoLog))
|
|
||||||
}
|
|
||||||
if !contains(infoLog, "test", t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the header has the correct format.
|
|
||||||
func TestHeader(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
defer func(previous func() time.Time) { timeNow = previous }(timeNow)
|
|
||||||
timeNow = func() time.Time {
|
|
||||||
return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
|
|
||||||
}
|
|
||||||
pid = 1234
|
|
||||||
Info("test")
|
|
||||||
var line int
|
|
||||||
format := "I0102 15:04:05.067890 logger/glog/glog_test.go:%d] test\n"
|
|
||||||
n, err := fmt.Sscanf(contents(infoLog), format, &line)
|
|
||||||
if n != 1 || err != nil {
|
|
||||||
t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
|
|
||||||
}
|
|
||||||
// Scanf treats multiple spaces as equivalent to a single space,
|
|
||||||
// so check for correct space-padding also.
|
|
||||||
want := fmt.Sprintf(format, line)
|
|
||||||
if contents(infoLog) != want {
|
|
||||||
t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that an Error log goes to Warning and Info.
|
|
||||||
// Even in the Info log, the source character will be E, so the data should
|
|
||||||
// all be identical.
|
|
||||||
func TestError(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
Error("test")
|
|
||||||
if !contains(errorLog, "E", t) {
|
|
||||||
t.Errorf("Error has wrong character: %q", contents(errorLog))
|
|
||||||
}
|
|
||||||
if !contains(errorLog, "test", t) {
|
|
||||||
t.Error("Error failed")
|
|
||||||
}
|
|
||||||
str := contents(errorLog)
|
|
||||||
if !contains(warningLog, str, t) {
|
|
||||||
t.Error("Warning failed")
|
|
||||||
}
|
|
||||||
if !contains(infoLog, str, t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that a Warning log goes to Info.
|
|
||||||
// Even in the Info log, the source character will be W, so the data should
|
|
||||||
// all be identical.
|
|
||||||
func TestWarning(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
Warning("test")
|
|
||||||
if !contains(warningLog, "W", t) {
|
|
||||||
t.Errorf("Warning has wrong character: %q", contents(warningLog))
|
|
||||||
}
|
|
||||||
if !contains(warningLog, "test", t) {
|
|
||||||
t.Error("Warning failed")
|
|
||||||
}
|
|
||||||
str := contents(warningLog)
|
|
||||||
if !contains(infoLog, str, t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that a V log goes to Info.
|
|
||||||
func TestV(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
logging.verbosity.Set("2")
|
|
||||||
defer logging.verbosity.Set("0")
|
|
||||||
V(2).Info("test")
|
|
||||||
if !contains(infoLog, "I", t) {
|
|
||||||
t.Errorf("Info has wrong character: %q", contents(infoLog))
|
|
||||||
}
|
|
||||||
if !contains(infoLog, "test", t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that a vmodule enables a log in this file.
|
|
||||||
func TestVmoduleOn(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
logging.vmodule.Set("glog_test.go=2")
|
|
||||||
defer logging.vmodule.Set("")
|
|
||||||
if !V(1) {
|
|
||||||
t.Error("V not enabled for 1")
|
|
||||||
}
|
|
||||||
if !V(2) {
|
|
||||||
t.Error("V not enabled for 2")
|
|
||||||
}
|
|
||||||
if V(3) {
|
|
||||||
t.Error("V enabled for 3")
|
|
||||||
}
|
|
||||||
V(2).Info("test")
|
|
||||||
if !contains(infoLog, "I", t) {
|
|
||||||
t.Errorf("Info has wrong character: %q", contents(infoLog))
|
|
||||||
}
|
|
||||||
if !contains(infoLog, "test", t) {
|
|
||||||
t.Error("Info failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that a vmodule of another file does not enable a log in this file.
|
|
||||||
func TestVmoduleOff(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
logging.vmodule.Set("notthisfile=2")
|
|
||||||
defer logging.vmodule.Set("")
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
if V(Level(i)) {
|
|
||||||
t.Errorf("V enabled for %d", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
V(2).Info("test")
|
|
||||||
if contents(infoLog) != "" {
|
|
||||||
t.Error("V logged incorrectly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var patternTests = []struct{ input, want string }{
|
|
||||||
{"foo/bar/x.go", ".*/foo/bar/x\\.go$"},
|
|
||||||
{"foo/*/x.go", ".*/foo(/.*)?/x\\.go$"},
|
|
||||||
{"foo/*", ".*/foo(/.*)?/[^/]+\\.go$"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCompileModulePattern(t *testing.T) {
|
|
||||||
for _, test := range patternTests {
|
|
||||||
re, err := compileModulePattern(test.input)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%s: %v", test.input, err)
|
|
||||||
}
|
|
||||||
if re.String() != test.want {
|
|
||||||
t.Errorf("mismatch for %q: got %q, want %q", test.input, re.String(), test.want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// vGlobs are patterns that match/don't match this file at V=2.
|
|
||||||
var vGlobs = map[string]bool{
|
|
||||||
// Easy to test the numeric match here.
|
|
||||||
"glog_test.go=1": false, // If -vmodule sets V to 1, V(2) will fail.
|
|
||||||
"glog_test.go=2": true,
|
|
||||||
"glog_test.go=3": true, // If -vmodule sets V to 1, V(3) will succeed.
|
|
||||||
|
|
||||||
// Import path prefix matching
|
|
||||||
"logger/glog=1": false,
|
|
||||||
"logger/glog=2": true,
|
|
||||||
"logger/glog=3": true,
|
|
||||||
|
|
||||||
// Import path glob matching
|
|
||||||
"logger/*=1": false,
|
|
||||||
"logger/*=2": true,
|
|
||||||
"logger/*=3": true,
|
|
||||||
|
|
||||||
// These all use 2 and check the patterns.
|
|
||||||
"*=2": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that vmodule globbing works as advertised.
|
|
||||||
func testVmoduleGlob(pat string, match bool, t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
defer logging.vmodule.Set("")
|
|
||||||
logging.vmodule.Set(pat)
|
|
||||||
if V(2) != Verbose(match) {
|
|
||||||
t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that a vmodule globbing works as advertised.
|
|
||||||
func TestVmoduleGlob(t *testing.T) {
|
|
||||||
for glob, match := range vGlobs {
|
|
||||||
testVmoduleGlob(glob, match, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRollover(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
var err error
|
|
||||||
defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
|
|
||||||
logExitFunc = func(e error) {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
defer func(previous uint64) { MaxSize = previous }(MaxSize)
|
|
||||||
MaxSize = 512
|
|
||||||
|
|
||||||
Info("x") // Be sure we have a file.
|
|
||||||
info, ok := logging.file[infoLog].(*syncBuffer)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("info wasn't created")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("info has initial error: %v", err)
|
|
||||||
}
|
|
||||||
fname0 := info.file.Name()
|
|
||||||
Info(strings.Repeat("x", int(MaxSize))) // force a rollover
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("info has error after big write: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the next log file gets a file name with a different
|
|
||||||
// time stamp.
|
|
||||||
//
|
|
||||||
// TODO: determine whether we need to support subsecond log
|
|
||||||
// rotation. C++ does not appear to handle this case (nor does it
|
|
||||||
// handle Daylight Savings Time properly).
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
|
|
||||||
Info("x") // create a new file
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error after rotation: %v", err)
|
|
||||||
}
|
|
||||||
fname1 := info.file.Name()
|
|
||||||
if fname0 == fname1 {
|
|
||||||
t.Errorf("info.f.Name did not change: %v", fname0)
|
|
||||||
}
|
|
||||||
if info.nbytes >= MaxSize {
|
|
||||||
t.Errorf("file size was not reset: %d", info.nbytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLogBacktraceAt(t *testing.T) {
|
|
||||||
setFlags()
|
|
||||||
defer logging.swap(logging.newBuffers())
|
|
||||||
// The peculiar style of this code simplifies line counting and maintenance of the
|
|
||||||
// tracing block below.
|
|
||||||
var infoLine string
|
|
||||||
setTraceLocation := func(file string, line int, ok bool, delta int) {
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("could not get file:line")
|
|
||||||
}
|
|
||||||
_, file = filepath.Split(file)
|
|
||||||
infoLine = fmt.Sprintf("%s:%d", file, line+delta)
|
|
||||||
err := logging.traceLocation.Set(infoLine)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("error setting log_backtrace_at: ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// Start of tracing block. These lines know about each other's relative position.
|
|
||||||
_, file, line, ok := runtime.Caller(0)
|
|
||||||
setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
|
|
||||||
Info("we want a stack trace here")
|
|
||||||
}
|
|
||||||
numAppearances := strings.Count(contents(infoLog), infoLine)
|
|
||||||
if numAppearances < 2 {
|
|
||||||
// Need 2 appearances, one in the log header and one in the trace:
|
|
||||||
// log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
|
|
||||||
// ...
|
|
||||||
// github.com/glog/glog_test.go:280 (0x41ba91)
|
|
||||||
// ...
|
|
||||||
// We could be more precise but that would require knowing the details
|
|
||||||
// of the traceback format, which may not be dependable.
|
|
||||||
t.Fatal("got no trace back; log is ", contents(infoLog))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHeader(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
buf, _, _ := logging.header(infoLog, 0)
|
|
||||||
logging.putBuffer(buf)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package logger
|
|
||||||
|
|
||||||
const (
|
|
||||||
Error = iota + 1
|
|
||||||
Warn
|
|
||||||
Info
|
|
||||||
Debug
|
|
||||||
Detail
|
|
||||||
|
|
||||||
Ridiculousness = 100
|
|
||||||
)
|
|
@ -18,13 +18,13 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/rcrowley/go-metrics/exp"
|
"github.com/rcrowley/go-metrics/exp"
|
||||||
)
|
)
|
||||||
@ -41,7 +41,7 @@ var Enabled = false
|
|||||||
func init() {
|
func init() {
|
||||||
for _, arg := range os.Args {
|
for _, arg := range os.Args {
|
||||||
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
|
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
|
||||||
glog.V(logger.Info).Infof("Enabling metrics collection")
|
log.Info(fmt.Sprintf("Enabling metrics collection"))
|
||||||
Enabled = true
|
Enabled = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func CollectProcessMetrics(refresh time.Duration) {
|
|||||||
diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
|
diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
|
||||||
diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
|
diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
|
||||||
} else {
|
} else {
|
||||||
glog.V(logger.Debug).Infof("failed to read disk metrics: %v", err)
|
log.Debug(fmt.Sprintf("failed to read disk metrics: %v", err))
|
||||||
}
|
}
|
||||||
// Iterate loading the different stats and updating the meters
|
// Iterate loading the different stats and updating the meters
|
||||||
for i := 1; ; i++ {
|
for i := 1; ; i++ {
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
package miner
|
package miner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ done:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
||||||
glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index)
|
log.Debug(fmt.Sprintf("(re)started agent[%d]. mining...\n", self.index))
|
||||||
|
|
||||||
// Mine
|
// Mine
|
||||||
nonce, mixDigest := self.pow.Search(work.Block, stop, self.index)
|
nonce, mixDigest := self.pow.Search(work.Block, stop, self.index)
|
||||||
|
@ -30,8 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
)
|
)
|
||||||
@ -87,7 +86,7 @@ out:
|
|||||||
if self.Mining() {
|
if self.Mining() {
|
||||||
self.Stop()
|
self.Stop()
|
||||||
atomic.StoreInt32(&self.shouldStart, 1)
|
atomic.StoreInt32(&self.shouldStart, 1)
|
||||||
glog.V(logger.Info).Infoln("Mining operation aborted due to sync operation")
|
log.Info(fmt.Sprint("Mining operation aborted due to sync operation"))
|
||||||
}
|
}
|
||||||
case downloader.DoneEvent, downloader.FailedEvent:
|
case downloader.DoneEvent, downloader.FailedEvent:
|
||||||
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
||||||
@ -124,7 +123,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
|
|||||||
self.threads = threads
|
self.threads = threads
|
||||||
|
|
||||||
if atomic.LoadInt32(&self.canStart) == 0 {
|
if atomic.LoadInt32(&self.canStart) == 0 {
|
||||||
glog.V(logger.Info).Infoln("Can not start mining operation due to network sync (starts when finished)")
|
log.Info(fmt.Sprint("Can not start mining operation due to network sync (starts when finished)"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
atomic.StoreInt32(&self.mining, 1)
|
atomic.StoreInt32(&self.mining, 1)
|
||||||
@ -133,7 +132,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
|
|||||||
self.worker.register(NewCpuAgent(i, self.pow))
|
self.worker.register(NewCpuAgent(i, self.pow))
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents))
|
log.Info(fmt.Sprintf("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)))
|
||||||
self.worker.start()
|
self.worker.start()
|
||||||
self.worker.commitNewWork()
|
self.worker.commitNewWork()
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package miner
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -26,8 +27,7 @@ import (
|
|||||||
"github.com/ethereum/ethash"
|
"github.com/ethereum/ethash"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -140,13 +140,13 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
|
|||||||
// Make sure the work submitted is present
|
// Make sure the work submitted is present
|
||||||
work := a.work[hash]
|
work := a.work[hash]
|
||||||
if work == nil {
|
if work == nil {
|
||||||
glog.V(logger.Info).Infof("Work was submitted for %x but no pending work found", hash)
|
log.Info(fmt.Sprintf("Work was submitted for %x but no pending work found", hash))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Make sure the PoW solutions is indeed valid
|
// Make sure the PoW solutions is indeed valid
|
||||||
block := work.Block.WithMiningResult(nonce, mixDigest)
|
block := work.Block.WithMiningResult(nonce, mixDigest)
|
||||||
if !a.pow.Verify(block) {
|
if !a.pow.Verify(block) {
|
||||||
glog.V(logger.Warn).Infof("Invalid PoW submitted for %x", hash)
|
log.Warn(fmt.Sprintf("Invalid PoW submitted for %x", hash))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Solutions seems to be valid, return to the miner and notify acceptance
|
// Solutions seems to be valid, return to the miner and notify acceptance
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user