Merge commit '3f40e65c4' into merge/geth-v1.13.1

This commit is contained in:
philip-morlier 2023-09-18 16:13:24 -07:00
commit 5d773a540d
80 changed files with 1494 additions and 2322 deletions

View File

@ -4,20 +4,20 @@
24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz
# https://go.dev/dl/
818d46ede85682dd551ad378ef37a4d247006f12ec59b5b755601d2ce114369a go1.21.0.src.tar.gz
b314de9f704ab122c077d2ec8e67e3670affe8865479d1f01991e7ac55d65e70 go1.21.0.darwin-amd64.tar.gz
3aca44de55c5e098de2f406e98aba328898b05d509a2e2a356416faacf2c4566 go1.21.0.darwin-arm64.tar.gz
312a0065714a50862af714e7a5b3fbbd70fe68f905ffb9bcc56d42eadf6bffab go1.21.0.freebsd-386.tar.gz
b8eaa36654625df799654f77f4af0ea7bd9e5e760ebe86e68fe7c484748ae995 go1.21.0.freebsd-amd64.tar.gz
0e6f378d9b072fab0a3d9ff4d5e990d98487d47252dba8160015a61e6bd0bcba go1.21.0.linux-386.tar.gz
d0398903a16ba2232b389fb31032ddf57cac34efda306a0eebac34f0965a0742 go1.21.0.linux-amd64.tar.gz
f3d4548edf9b22f26bbd49720350bbfe59d75b7090a1a2bff1afad8214febaf3 go1.21.0.linux-arm64.tar.gz
e377a0004957c8c560a3ff99601bce612330a3d95ba3b0a2ae144165fc87deb1 go1.21.0.linux-armv6l.tar.gz
e938ffc81d8ebe5efc179240960ba22da6a841ff05d5cab7ce2547112b14a47f go1.21.0.linux-ppc64le.tar.gz
be7338df8e5d5472dfa307b0df2b446d85d001b0a2a3cdb1a14048d751b70481 go1.21.0.linux-s390x.tar.gz
af920fbb74fc3d173118dc3cc35f02a709c1de642700e92a91a7d16981df3fec go1.21.0.windows-386.zip
732121e64e0ecb07c77fdf6cc1bc5ce7b242c2d40d4ac29021ad4c64a08731f6 go1.21.0.windows-amd64.zip
41342f5a0f8c083b14c68bde738ddcd313a4f53a5854bfdfab47f0e88247de12 go1.21.0.windows-arm64.zip
bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz
809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz
ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz
9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz
2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz
b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz
b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz
7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz
f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz
eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz
a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz
170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip
10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip
41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip
# https://github.com/golangci/golangci-lint/releases
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz

View File

@ -139,7 +139,7 @@ var (
// This is the version of Go that will be downloaded by
//
// go run ci.go install -dlgo
dlgoVersion = "1.21.0"
dlgoVersion = "1.21.1"
// This is the version of Go that will be used to bootstrap the PPA builder.
//

View File

@ -185,10 +185,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
protos = append(protos, fmt.Sprintf("%v/%d", p.Name, p.Version))
}
metrics.NewRegisteredGaugeInfo("geth/info", nil).Update(metrics.GaugeInfoValue{
"arch": runtime.GOARCH,
"os": runtime.GOOS,
"version": cfg.Node.Version,
"eth_protocols": strings.Join(protos, ","),
"arch": runtime.GOARCH,
"os": runtime.GOOS,
"version": cfg.Node.Version,
"protocols": strings.Join(protos, ","),
})
}

View File

@ -251,11 +251,16 @@ func init() {
debug.Flags,
metricsFlags,
)
flags.AutoEnvVars(app.Flags, "GETH")
app.Before = func(ctx *cli.Context) error {
maxprocs.Set() // Automatically set GOMAXPROCS to match Linux container CPU quota.
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx)
if err := debug.Setup(ctx); err != nil {
return err
}
flags.CheckEnvVars(ctx, app.Flags, "GETH")
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()

View File

@ -340,28 +340,38 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
if !bc.HasState(head.Root) {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
}
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil {
return nil, err
}
// Chain rewound, persist old snapshot number to indicate recovery procedure
if snapDisk != 0 {
rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
}
if head.Number.Uint64() == 0 {
// The genesis state is missing, which is only possible in the path-based
// scheme. This situation occurs when the state syncer overwrites it.
//
// The solution is to reset the state to the genesis state. Although it may not
// match the sync target, the state healer will later address and correct any
// inconsistencies.
bc.resetState()
} else {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
return nil, err
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
}
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil {
return nil, err
}
// Chain rewound, persist old snapshot number to indicate recovery procedure
if snapDisk != 0 {
rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
}
} else {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
return nil, err
}
}
}
}
@ -620,6 +630,28 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
}
}
// resetState resets the persistent state to genesis state if it's not present.
func (bc *BlockChain) resetState() {
// Short circuit if the genesis state is already present.
root := bc.genesisBlock.Root()
if bc.HasState(root) {
return
}
// Reset the state database to empty for committing genesis state.
// Note, it should only happen in path-based scheme and Reset function
// is also only call-able in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
log.Crit("Failed to clean state", "err", err) // Shouldn't happen
}
}
// Write genesis state into database.
if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
log.Info("Reset state to genesis", "root", root)
}
// setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than
@ -646,25 +678,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()
// resetState resets the persistent state to genesis if it's not available.
resetState := func() {
// Short circuit if the genesis state is already present.
if bc.HasState(bc.genesisBlock.Root()) {
return
}
// Reset the state database to empty for committing genesis state.
// Note, it should only happen in path-based scheme and Reset function
// is also only call-able in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
log.Crit("Failed to clean state", "err", err) // Shouldn't happen
}
}
// Write genesis state into database.
if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
}
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
@ -674,7 +687,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
resetState()
bc.resetState()
} else {
// Block exists, keep rewinding until we find one with state,
// keeping rewinding until we exceed the optional threshold
@ -703,7 +716,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
resetState()
bc.resetState()
} else if !bc.HasState(newHeadBlock.Root()) {
// Rewind to a block with recoverable state. If the state is
// missing, run the state recovery here.

View File

@ -88,6 +88,9 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
infos = append(infos, info)
case stateFreezerName:
if ReadStateScheme(db) != PathScheme {
continue
}
datadir, err := db.AncientDatadir()
if err != nil {
return nil, err

View File

@ -987,7 +987,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
return true, size, nil, nil, nil
}
slot := common.CopyBytes(iter.Slot())
if iter.Error() != nil { // error might occur after Slot function
if err := iter.Error(); err != nil { // error might occur after Slot function
return false, 0, nil, nil, err
}
size += common.StorageSize(common.HashLength + len(slot))
@ -997,7 +997,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
return false, 0, nil, nil, err
}
}
if iter.Error() != nil { // error might occur during iteration
if err := iter.Error(); err != nil { // error might occur during iteration
return false, 0, nil, nil, err
}
if stack.Hash() != root {
@ -1075,12 +1075,10 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
slotDeletionSkip.Inc(1)
}
n := int64(len(slots))
if n > slotDeletionMaxCount.Value() {
slotDeletionMaxCount.Update(n)
}
if int64(size) > slotDeletionMaxSize.Value() {
slotDeletionMaxSize.Update(int64(size))
}
slotDeletionMaxCount.UpdateIfGt(int64(len(slots)))
slotDeletionMaxSize.UpdateIfGt(int64(size))
slotDeletionTimer.UpdateSince(start)
slotDeletionCount.Mark(n)
slotDeletionSize.Mark(int64(size))

View File

@ -1,8 +1,5 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"

View File

@ -1,8 +1,5 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"

View File

@ -1,8 +1,5 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"

View File

@ -1,8 +1,5 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"

View File

@ -362,6 +362,9 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c
// The (from, to) parameters are the sequence of blocks to search, which can go
// either forwards or backwards
func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) {
if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
return 0, errors.New("state history is not yet available in path-based scheme")
}
db := api.eth.ChainDb()
var pivot uint64
if p := rawdb.ReadLastPivotNumber(db); p != nil {
@ -422,6 +425,9 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error
// If the value is shorter than the block generation time, or even 0 or negative,
// the node will flush trie after processing each block (effectively archive mode).
func (api *DebugAPI) SetTrieFlushInterval(interval string) error {
if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
return errors.New("trie flush interval is undefined for path-based scheme")
}
t, err := time.ParseDuration(interval)
if err != nil {
return err
@ -431,6 +437,9 @@ func (api *DebugAPI) SetTrieFlushInterval(interval string) error {
}
// GetTrieFlushInterval gets the current value of in-memory trie flush interval
func (api *DebugAPI) GetTrieFlushInterval() string {
return api.eth.blockchain.GetTrieFlushInterval().String()
func (api *DebugAPI) GetTrieFlushInterval() (string, error) {
if api.eth.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
return "", errors.New("trie flush interval is undefined for path-based scheme")
}
return api.eth.blockchain.GetTrieFlushInterval().String(), nil
}

View File

@ -207,21 +207,21 @@ func (api *ConsensusAPI) verifyPayloadAttributes(attr *engine.PayloadAttributes)
c := api.eth.BlockChain().Config()
// Verify withdrawals attribute for Shanghai.
if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, attr.Timestamp); err != nil {
if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, c.LondonBlock, attr.Timestamp); err != nil {
return fmt.Errorf("invalid withdrawals: %w", err)
}
// Verify beacon root attribute for Cancun.
if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, attr.Timestamp); err != nil {
if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, c.LondonBlock, attr.Timestamp); err != nil {
return fmt.Errorf("invalid parent beacon block root: %w", err)
}
return nil
}
func checkAttribute(active func(*big.Int, uint64) bool, exists bool, time uint64) error {
if active(common.Big0, time) && !exists {
func checkAttribute(active func(*big.Int, uint64) bool, exists bool, block *big.Int, time uint64) error {
if active(block, time) && !exists {
return errors.New("fork active, missing expected attribute")
}
if !active(common.Big0, time) && exists {
if !active(block, time) && exists {
return errors.New("fork inactive, unexpected attribute set")
}
return nil

View File

@ -398,7 +398,14 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
log.Info("Block synchronisation started")
}
if mode == SnapSync {
// Snap sync uses the snapshot namespace to store potentially flakey data until
// Snap sync will directly modify the persistent state, making the entire
// trie database unusable until the state is fully synced. To prevent any
// subsequent state reads, explicitly disable the trie database and state
// syncer is responsible to address and correct any state missing.
if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
d.blockchain.TrieDB().Reset(types.EmptyRootHash)
}
// Snap sync uses the snapshot namespace to store potentially flaky data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean-
// time to prevent access.
if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
@ -1606,17 +1613,30 @@ func (d *Downloader) processSnapSyncContent() error {
// To cater for moving pivot points, track the pivot block and subsequently
// accumulated download results separately.
//
// These will be nil up to the point where we reach the pivot, and will only
// be set temporarily if the synced blocks are piling up, but the pivot is
// still busy downloading. In that case, we need to occasionally check for
// pivot moves, so need to unblock the loop. These fields will accumulate
// the results in the meantime.
//
// Note, there's no issue with memory piling up since after 64 blocks the
// pivot will forcefully move so these accumulators will be dropped.
var (
oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot
)
for {
// Wait for the next batch of downloaded data to be available, and if the pivot
// block became stale, move the goalpost
results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
// Wait for the next batch of downloaded data to be available. If we have
// not yet reached the pivot point, wait blockingly as there's no need to
// spin-loop check for pivot moves. If we reached the pivot but have not
// yet processed it, check for results async, so we might notice pivot
// moves while state syncing. If the pivot was passed fully, block again
// as there's no more reason to check for pivot moves at all.
results := d.queue.Results(oldPivot == nil)
if len(results) == 0 {
// If pivot sync is done, stop
if oldPivot == nil {
if d.committed.Load() {
d.reportSnapSyncProgress(true)
return sync.Cancel()
}
@ -1639,21 +1659,23 @@ func (d *Downloader) processSnapSyncContent() error {
pivot := d.pivotHeader
d.pivotLock.RUnlock()
if oldPivot == nil {
if pivot.Root != sync.root {
sync.Cancel()
sync = d.syncState(pivot.Root)
if oldPivot == nil { // no results piling up, we can move the pivot
if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
if pivot.Root != sync.root { // pivot position changed, we can move the pivot
sync.Cancel()
sync = d.syncState(pivot.Root)
go closeOnErr(sync)
go closeOnErr(sync)
}
}
} else {
} else { // results already piled up, consume before handling pivot move
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
}
// Split around the pivot block and process the two sides via snap/full sync
if !d.committed.Load() {
latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot
// become stale in the network and it was garbage collected, move to a
// become stale in the network, and it was garbage collected, move to a
// new pivot.
//
// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those

2
go.mod
View File

@ -62,7 +62,7 @@ require (
github.com/supranational/blst v0.3.11
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tyler-smith/go-bip39 v1.1.0
github.com/urfave/cli/v2 v2.24.1
github.com/urfave/cli/v2 v2.25.7
go.uber.org/automaxprocs v1.5.2
golang.org/x/crypto v0.12.0
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad

6
go.sum
View File

@ -39,7 +39,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
@ -553,8 +553,8 @@ github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3C
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU=
github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=

View File

@ -272,8 +272,6 @@ func (t *Transaction) GasPrice(ctx context.Context) hexutil.Big {
return hexutil.Big{}
}
switch tx.Type() {
case types.AccessListTxType:
return hexutil.Big(*tx.GasPrice())
case types.DynamicFeeTxType:
if block != nil {
if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil {
@ -312,9 +310,7 @@ func (t *Transaction) MaxFeePerGas(ctx context.Context) *hexutil.Big {
return nil
}
switch tx.Type() {
case types.AccessListTxType:
return nil
case types.DynamicFeeTxType:
case types.DynamicFeeTxType, types.BlobTxType:
return (*hexutil.Big)(tx.GasFeeCap())
default:
return nil
@ -327,15 +323,33 @@ func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) *hexutil.Big {
return nil
}
switch tx.Type() {
case types.AccessListTxType:
return nil
case types.DynamicFeeTxType:
case types.DynamicFeeTxType, types.BlobTxType:
return (*hexutil.Big)(tx.GasTipCap())
default:
return nil
}
}
func (t *Transaction) MaxFeePerBlobGas(ctx context.Context) *hexutil.Big {
tx, _ := t.resolve(ctx)
if tx == nil {
return nil
}
return (*hexutil.Big)(tx.BlobGasFeeCap())
}
func (t *Transaction) BlobVersionedHashes(ctx context.Context) *[]common.Hash {
tx, _ := t.resolve(ctx)
if tx == nil {
return nil
}
if tx.Type() != types.BlobTxType {
return nil
}
blobHashes := tx.BlobHashes()
return &blobHashes
}
func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
tx, block := t.resolve(ctx)
if tx == nil {
@ -468,6 +482,40 @@ func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*hexutil.Uint64, e
return &ret, nil
}
func (t *Transaction) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) {
tx, _ := t.resolve(ctx)
if tx == nil {
return nil, nil
}
if tx.Type() != types.BlobTxType {
return nil, nil
}
receipt, err := t.getReceipt(ctx)
if err != nil || receipt == nil {
return nil, err
}
ret := hexutil.Uint64(receipt.BlobGasUsed)
return &ret, nil
}
func (t *Transaction) BlobGasPrice(ctx context.Context) (*hexutil.Big, error) {
tx, _ := t.resolve(ctx)
if tx == nil {
return nil, nil
}
if tx.Type() != types.BlobTxType {
return nil, nil
}
receipt, err := t.getReceipt(ctx)
if err != nil || receipt == nil {
return nil, err
}
ret := (*hexutil.Big)(receipt.BlobGasPrice)
return ret, nil
}
func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) {
receipt, err := t.getReceipt(ctx)
if err != nil || receipt == nil || receipt.ContractAddress == (common.Address{}) {
@ -1019,6 +1067,30 @@ func (b *Block) Withdrawals(ctx context.Context) (*[]*Withdrawal, error) {
return &ret, nil
}
func (b *Block) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) {
header, err := b.resolveHeader(ctx)
if err != nil {
return nil, err
}
if header.BlobGasUsed == nil {
return nil, nil
}
ret := hexutil.Uint64(*header.BlobGasUsed)
return &ret, nil
}
func (b *Block) ExcessBlobGas(ctx context.Context) (*hexutil.Uint64, error) {
header, err := b.resolveHeader(ctx)
if err != nil {
return nil, err
}
if header.ExcessBlobGas == nil {
return nil, nil
}
ret := hexutil.Uint64(*header.ExcessBlobGas)
return &ret, nil
}
// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside
// a block.
type BlockFilterCriteria struct {

View File

@ -71,8 +71,8 @@ const schema string = `
transaction: Transaction!
}
#EIP-2718
type AccessTuple{
# EIP-2718
type AccessTuple {
address: Address!
storageKeys : [Bytes32!]!
}
@ -112,6 +112,8 @@ const schema string = `
maxFeePerGas: BigInt
# MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
maxPriorityFeePerGas: BigInt
# MaxFeePerBlobGas is the maximum blob gas fee cap per blob the sender is willing to pay for blob transaction, in wei.
maxFeePerBlobGas: BigInt
# EffectiveTip is the actual amount of reward going to miner after considering the max fee cap.
effectiveTip: BigInt
# Gas is the maximum amount of gas this transaction can consume.
@ -141,6 +143,10 @@ const schema string = `
# coerced into the EIP-1559 format by setting both maxFeePerGas and
# maxPriorityFeePerGas as the transaction's gas price.
effectiveGasPrice: BigInt
# BlobGasUsed is the amount of blob gas used by this transaction.
blobGasUsed: Long
# blobGasPrice is the actual value per blob gas deducted from the senders account.
blobGasPrice: BigInt
# CreatedContract is the account that was created by a contract creation
# transaction. If the transaction was not a contract creation transaction,
# or it has not yet been mined, this field will be null.
@ -162,6 +168,8 @@ const schema string = `
# RawReceipt is the canonical encoding of the receipt. For post EIP-2718 typed transactions
# this is equivalent to TxType || ReceiptEncoding.
rawReceipt: Bytes!
# BlobVersionedHashes is a set of hash outputs from the blobs in the transaction.
blobVersionedHashes: [Bytes32!]
}
# BlockFilterCriteria encapsulates log filter criteria for a filter applied
@ -171,16 +179,16 @@ const schema string = `
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
@ -267,6 +275,10 @@ const schema string = `
# Withdrawals is a list of withdrawals associated with this block. If
# withdrawals are unavailable for this block, this field will be null.
withdrawals: [Withdrawal!]
# BlobGasUsed is the total amount of gas used by the transactions.
blobGasUsed: Long
# ExcessBlobGas is a running total of blob gas consumed in excess of the target, prior to the block.
excessBlobGas: Long
}
# CallData represents the data associated with a local contract call.
@ -312,21 +324,21 @@ const schema string = `
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
# SyncState contains the current synchronisation state of the client.
type SyncState{
type SyncState {
# StartingBlock is the block number at which synchronisation started.
startingBlock: Long!
# CurrentBlock is the point at which synchronisation has presently reached.
@ -337,17 +349,17 @@ const schema string = `
# Pending represents the current pending state.
type Pending {
# TransactionCount is the number of transactions in the pending state.
transactionCount: Long!
# Transactions is a list of transactions in the current pending state.
transactions: [Transaction!]
# Account fetches an Ethereum account for the pending state.
account(address: Address!): Account!
# Call executes a local call operation for the pending state.
call(data: CallData!): CallResult
# EstimateGas estimates the amount of gas that will be required for
# successful execution of a transaction for the pending state.
estimateGas(data: CallData!): Long!
# TransactionCount is the number of transactions in the pending state.
transactionCount: Long!
# Transactions is a list of transactions in the current pending state.
transactions: [Transaction!]
# Account fetches an Ethereum account for the pending state.
account(address: Address!): Account!
# Call executes a local call operation for the pending state.
call(data: CallData!): CallResult
# EstimateGas estimates the amount of gas that will be required for
# successful execution of a transaction for the pending state.
estimateGas(data: CallData!): Long!
}
type Query {

View File

@ -14,9 +14,6 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build go1.6
// +build go1.6
package debug
import "runtime/debug"

View File

@ -1,25 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !go1.6
// +build !go1.6
package debug
// LoudPanic panics in a way that gets all goroutine stacks printed on stderr.
func LoudPanic(x interface{}) {
panic(x)
}

View File

@ -14,9 +14,6 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build go1.5
// +build go1.5
package debug
import (

View File

@ -1,32 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !go1.5
// +build !go1.5
// no-op implementation of tracing methods for Go < 1.5.
package debug
import "errors"
func (*HandlerT) StartGoTrace(string) error {
return errors.New("tracing is not supported on Go < 1.5")
}
func (*HandlerT) StopGoTrace() error {
return errors.New("tracing is not supported on Go < 1.5")
}

View File

@ -1520,8 +1520,8 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int {
fee := tx.GasTipCap()
fee = fee.Add(fee, baseFee)
if tx.GasTipCapIntCmp(fee) < 0 {
return tx.GasTipCap()
if tx.GasFeeCapIntCmp(fee) < 0 {
return tx.GasFeeCap()
}
return fee
}

View File

@ -20,11 +20,13 @@ import (
"encoding"
"errors"
"flag"
"fmt"
"math/big"
"os"
"os/user"
"path/filepath"
"strings"
"syscall"
"github.com/ethereum/go-ethereum/common/math"
"github.com/urfave/cli/v2"
@ -68,6 +70,7 @@ type DirectoryFlag struct {
Value DirectoryString
Aliases []string
EnvVars []string
}
// For cli.Flag:
@ -79,6 +82,14 @@ func (f *DirectoryFlag) String() string { return cli.FlagStringer(f) }
// Apply called by cli library, grabs variable from environment (if in env)
// and adds variable to flag set for parsing.
func (f *DirectoryFlag) Apply(set *flag.FlagSet) error {
for _, envVar := range f.EnvVars {
envVar = strings.TrimSpace(envVar)
if value, found := syscall.Getenv(envVar); found {
f.Value.Set(value)
f.HasBeenSet = true
break
}
}
eachName(f, func(name string) {
set.Var(&f.Value, f.Name, f.Usage)
})
@ -102,7 +113,7 @@ func (f *DirectoryFlag) GetCategory() string { return f.Category }
func (f *DirectoryFlag) TakesValue() bool { return true }
func (f *DirectoryFlag) GetUsage() string { return f.Usage }
func (f *DirectoryFlag) GetValue() string { return f.Value.String() }
func (f *DirectoryFlag) GetEnvVars() []string { return nil } // env not supported
func (f *DirectoryFlag) GetEnvVars() []string { return f.EnvVars }
func (f *DirectoryFlag) GetDefaultText() string {
if f.DefaultText != "" {
@ -156,6 +167,7 @@ type TextMarshalerFlag struct {
Value TextMarshaler
Aliases []string
EnvVars []string
}
// For cli.Flag:
@ -165,6 +177,16 @@ func (f *TextMarshalerFlag) IsSet() bool { return f.HasBeenSet }
func (f *TextMarshalerFlag) String() string { return cli.FlagStringer(f) }
func (f *TextMarshalerFlag) Apply(set *flag.FlagSet) error {
for _, envVar := range f.EnvVars {
envVar = strings.TrimSpace(envVar)
if value, found := syscall.Getenv(envVar); found {
if err := f.Value.UnmarshalText([]byte(value)); err != nil {
return fmt.Errorf("could not parse %q from environment variable %q for flag %s: %s", value, envVar, f.Name, err)
}
f.HasBeenSet = true
break
}
}
eachName(f, func(name string) {
set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage)
})
@ -187,7 +209,7 @@ func (f *TextMarshalerFlag) GetCategory() string { return f.Category }
func (f *TextMarshalerFlag) TakesValue() bool { return true }
func (f *TextMarshalerFlag) GetUsage() string { return f.Usage }
func (f *TextMarshalerFlag) GetEnvVars() []string { return nil } // env not supported
func (f *TextMarshalerFlag) GetEnvVars() []string { return f.EnvVars }
func (f *TextMarshalerFlag) GetValue() string {
t, err := f.Value.MarshalText()
@ -237,6 +259,7 @@ type BigFlag struct {
Value *big.Int
Aliases []string
EnvVars []string
}
// For cli.Flag:
@ -246,6 +269,16 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet }
func (f *BigFlag) String() string { return cli.FlagStringer(f) }
func (f *BigFlag) Apply(set *flag.FlagSet) error {
for _, envVar := range f.EnvVars {
envVar = strings.TrimSpace(envVar)
if value, found := syscall.Getenv(envVar); found {
if _, ok := f.Value.SetString(value, 10); !ok {
return fmt.Errorf("could not parse %q from environment variable %q for flag %s", value, envVar, f.Name)
}
f.HasBeenSet = true
break
}
}
eachName(f, func(name string) {
f.Value = new(big.Int)
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
@ -271,7 +304,7 @@ func (f *BigFlag) GetCategory() string { return f.Category }
func (f *BigFlag) TakesValue() bool { return true }
func (f *BigFlag) GetUsage() string { return f.Usage }
func (f *BigFlag) GetValue() string { return f.Value.String() }
func (f *BigFlag) GetEnvVars() []string { return nil } // env not supported
func (f *BigFlag) GetEnvVars() []string { return f.EnvVars }
func (f *BigFlag) GetDefaultText() string {
if f.DefaultText != "" {

View File

@ -18,13 +18,22 @@ package flags
import (
"fmt"
"os"
"regexp"
"sort"
"strings"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
// usecolor defines whether the CLI help should use colored output or normal dumb
// colorless terminal formatting.
var usecolor = (isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())) && os.Getenv("TERM") != "dumb"
// NewApp creates an app with sane defaults.
func NewApp(usage string) *cli.App {
git, _ := version.VCS()
@ -129,6 +138,14 @@ func doMigrateFlags(ctx *cli.Context) {
}
func init() {
if usecolor {
// Annotate all help categories with colors
cli.AppHelpTemplate = regexp.MustCompile("[A-Z ]+:").ReplaceAllString(cli.AppHelpTemplate, "\u001B[33m$0\u001B[0m")
// Annotate flag categories with colors (private template, so need to
// copy-paste the entire thing here...)
cli.AppHelpTemplate = strings.ReplaceAll(cli.AppHelpTemplate, "{{template \"visibleFlagCategoryTemplate\" .}}", "{{range .VisibleFlagCategories}}\n {{if .Name}}\u001B[33m{{.Name}}\u001B[0m\n\n {{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}}\n{{else}}{{$e}}\n {{end}}{{end}}{{end}}")
}
cli.FlagStringer = FlagString
}
@ -138,37 +155,31 @@ func FlagString(f cli.Flag) string {
if !ok {
return ""
}
needsPlaceholder := df.TakesValue()
placeholder := ""
if needsPlaceholder {
placeholder = "value"
}
namesText := pad(cli.FlagNamePrefixer(df.Names(), placeholder), 30)
namesText := cli.FlagNamePrefixer(df.Names(), placeholder)
defaultValueString := ""
if s := df.GetDefaultText(); s != "" {
defaultValueString = " (default: " + s + ")"
}
usage := strings.TrimSpace(df.GetUsage())
envHint := strings.TrimSpace(cli.FlagEnvHinter(df.GetEnvVars(), ""))
if len(envHint) > 0 {
usage += " " + envHint
if envHint != "" {
envHint = " (" + envHint[1:len(envHint)-1] + ")"
}
usage := strings.TrimSpace(df.GetUsage())
usage = wordWrap(usage, 80)
usage = indent(usage, 10)
return fmt.Sprintf("\n %s%s\n%s", namesText, defaultValueString, usage)
}
func pad(s string, length int) string {
if len(s) < length {
s += strings.Repeat(" ", length-len(s))
if usecolor {
return fmt.Sprintf("\n \u001B[32m%-35s%-35s\u001B[0m%s\n%s", namesText, defaultValueString, envHint, usage)
} else {
return fmt.Sprintf("\n %-35s%-35s%s\n%s", namesText, defaultValueString, envHint, usage)
}
return s
}
func indent(s string, nspace int) string {
@ -213,3 +224,78 @@ func wordWrap(s string, width int) string {
return output.String()
}
// AutoEnvVars extends all the specific CLI flags with automatically generated
// env vars by capitalizing the flag, replacing . with _ and prefixing it with
// the specified string.
//
// Note, the prefix should *not* contain the separator underscore, that will be
// added automatically.
func AutoEnvVars(flags []cli.Flag, prefix string) {
for _, flag := range flags {
envvar := strings.ToUpper(prefix + "_" + strings.ReplaceAll(strings.ReplaceAll(flag.Names()[0], ".", "_"), "-", "_"))
switch flag := flag.(type) {
case *cli.StringFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *cli.BoolFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *cli.IntFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *cli.Uint64Flag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *cli.DurationFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *cli.PathFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *BigFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *TextMarshalerFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
case *DirectoryFlag:
flag.EnvVars = append(flag.EnvVars, envvar)
}
}
}
// CheckEnvVars iterates over all the environment variables and checks if any of
// them look like a CLI flag but is not consumed. This can be used to detect old
// or mistyped names.
func CheckEnvVars(ctx *cli.Context, flags []cli.Flag, prefix string) {
known := make(map[string]string)
for _, flag := range flags {
docflag, ok := flag.(cli.DocGenerationFlag)
if !ok {
continue
}
for _, envvar := range docflag.GetEnvVars() {
known[envvar] = flag.Names()[0]
}
}
keyvals := os.Environ()
sort.Strings(keyvals)
for _, keyval := range keyvals {
key := strings.Split(keyval, "=")[0]
if !strings.HasPrefix(key, prefix) {
continue
}
if flag, ok := known[key]; ok {
if ctx.Count(flag) > 0 {
log.Info("Config environment variable found", "envvar", key, "shadowedby", "--"+flag)
} else {
log.Info("Config environment variable found", "envvar", key)
}
} else {
log.Warn("Unknown config environment variable", "envvar", key)
}
}
}

View File

@ -7,6 +7,7 @@ import (
"os"
"reflect"
"sync"
"sync/atomic"
"github.com/go-stack/stack"
)
@ -354,3 +355,21 @@ func (m muster) FileHandler(path string, fmtr Format) Handler {
func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
return must(NetHandler(network, addr, fmtr))
}
// swapHandler wraps another handler that may be swapped out
// dynamically at runtime in a thread-safe fashion.
type swapHandler struct {
handler atomic.Value
}
func (h *swapHandler) Log(r *Record) error {
return (*h.handler.Load().(*Handler)).Log(r)
}
func (h *swapHandler) Swap(newHandler Handler) {
h.handler.Store(&newHandler)
}
func (h *swapHandler) Get() Handler {
return *h.handler.Load().(*Handler)
}

View File

@ -1,27 +0,0 @@
//go:build !go1.4
// +build !go1.4
package log
import (
"sync/atomic"
"unsafe"
)
// swapHandler wraps another handler that may be swapped out
// dynamically at runtime in a thread-safe fashion.
type swapHandler struct {
handler unsafe.Pointer
}
func (h *swapHandler) Log(r *Record) error {
return h.Get().Log(r)
}
func (h *swapHandler) Get() Handler {
return *(*Handler)(atomic.LoadPointer(&h.handler))
}
func (h *swapHandler) Swap(newHandler Handler) {
atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
}

View File

@ -1,24 +0,0 @@
//go:build go1.4
// +build go1.4
package log
import "sync/atomic"
// swapHandler wraps another handler that may be swapped out
// dynamically at runtime in a thread-safe fashion.
type swapHandler struct {
handler atomic.Value
}
func (h *swapHandler) Log(r *Record) error {
return (*h.handler.Load().(*Handler)).Log(r)
}
func (h *swapHandler) Swap(newHandler Handler) {
h.handler.Store(&newHandler)
}
func (h *swapHandler) Get() Handler {
return *h.handler.Load().(*Handler)
}

View File

@ -4,13 +4,16 @@ import (
"sync/atomic"
)
type CounterSnapshot interface {
Count() int64
}
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
Count() int64
Dec(int64)
Inc(int64)
Snapshot() Counter
Snapshot() CounterSnapshot
}
// GetOrRegisterCounter returns an existing Counter or constructs and registers
@ -38,13 +41,13 @@ func NewCounter() Counter {
if !Enabled {
return NilCounter{}
}
return &StandardCounter{}
return new(StandardCounter)
}
// NewCounterForced constructs a new StandardCounter and returns it no matter if
// the global switch is enabled or not.
func NewCounterForced() Counter {
return &StandardCounter{}
return new(StandardCounter)
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
@ -70,75 +73,40 @@ func NewRegisteredCounterForced(name string, r Registry) Counter {
return c
}
// CounterSnapshot is a read-only copy of another Counter.
type CounterSnapshot int64
// Clear panics.
func (CounterSnapshot) Clear() {
panic("Clear called on a CounterSnapshot")
}
// counterSnapshot is a read-only copy of another Counter.
type counterSnapshot int64
// Count returns the count at the time the snapshot was taken.
func (c CounterSnapshot) Count() int64 { return int64(c) }
// Dec panics.
func (CounterSnapshot) Dec(int64) {
panic("Dec called on a CounterSnapshot")
}
// Inc panics.
func (CounterSnapshot) Inc(int64) {
panic("Inc called on a CounterSnapshot")
}
// Snapshot returns the snapshot.
func (c CounterSnapshot) Snapshot() Counter { return c }
func (c counterSnapshot) Count() int64 { return int64(c) }
// NilCounter is a no-op Counter.
type NilCounter struct{}
// Clear is a no-op.
func (NilCounter) Clear() {}
// Count is a no-op.
func (NilCounter) Count() int64 { return 0 }
// Dec is a no-op.
func (NilCounter) Dec(i int64) {}
// Inc is a no-op.
func (NilCounter) Inc(i int64) {}
// Snapshot is a no-op.
func (NilCounter) Snapshot() Counter { return NilCounter{} }
func (NilCounter) Clear() {}
func (NilCounter) Dec(i int64) {}
func (NilCounter) Inc(i int64) {}
func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
type StandardCounter struct {
count atomic.Int64
}
type StandardCounter atomic.Int64
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
c.count.Store(0)
}
// Count returns the current count.
func (c *StandardCounter) Count() int64 {
return c.count.Load()
(*atomic.Int64)(c).Store(0)
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
c.count.Add(-i)
(*atomic.Int64)(c).Add(-i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
c.count.Add(i)
(*atomic.Int64)(c).Add(i)
}
// Snapshot returns a read-only copy of the counter.
func (c *StandardCounter) Snapshot() Counter {
return CounterSnapshot(c.Count())
func (c *StandardCounter) Snapshot() CounterSnapshot {
return counterSnapshot((*atomic.Int64)(c).Load())
}

View File

@ -5,13 +5,16 @@ import (
"sync/atomic"
)
type CounterFloat64Snapshot interface {
Count() float64
}
// CounterFloat64 holds a float64 value that can be incremented and decremented.
type CounterFloat64 interface {
Clear()
Count() float64
Dec(float64)
Inc(float64)
Snapshot() CounterFloat64
Snapshot() CounterFloat64Snapshot
}
// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers
@ -71,47 +74,19 @@ func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 {
return c
}
// CounterFloat64Snapshot is a read-only copy of another CounterFloat64.
type CounterFloat64Snapshot float64
// Clear panics.
func (CounterFloat64Snapshot) Clear() {
panic("Clear called on a CounterFloat64Snapshot")
}
// counterFloat64Snapshot is a read-only copy of another CounterFloat64.
type counterFloat64Snapshot float64
// Count returns the value at the time the snapshot was taken.
func (c CounterFloat64Snapshot) Count() float64 { return float64(c) }
func (c counterFloat64Snapshot) Count() float64 { return float64(c) }
// Dec panics.
func (CounterFloat64Snapshot) Dec(float64) {
panic("Dec called on a CounterFloat64Snapshot")
}
// Inc panics.
func (CounterFloat64Snapshot) Inc(float64) {
panic("Inc called on a CounterFloat64Snapshot")
}
// Snapshot returns the snapshot.
func (c CounterFloat64Snapshot) Snapshot() CounterFloat64 { return c }
// NilCounterFloat64 is a no-op CounterFloat64.
type NilCounterFloat64 struct{}
// Clear is a no-op.
func (NilCounterFloat64) Clear() {}
// Count is a no-op.
func (NilCounterFloat64) Count() float64 { return 0.0 }
// Dec is a no-op.
func (NilCounterFloat64) Dec(i float64) {}
// Inc is a no-op.
func (NilCounterFloat64) Inc(i float64) {}
// Snapshot is a no-op.
func (NilCounterFloat64) Snapshot() CounterFloat64 { return NilCounterFloat64{} }
func (NilCounterFloat64) Clear() {}
func (NilCounterFloat64) Count() float64 { return 0.0 }
func (NilCounterFloat64) Dec(i float64) {}
func (NilCounterFloat64) Inc(i float64) {}
func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} }
// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the
// atomic to manage a single float64 value.
@ -124,11 +99,6 @@ func (c *StandardCounterFloat64) Clear() {
c.floatBits.Store(0)
}
// Count returns the current value.
func (c *StandardCounterFloat64) Count() float64 {
return math.Float64frombits(c.floatBits.Load())
}
// Dec decrements the counter by the given amount.
func (c *StandardCounterFloat64) Dec(v float64) {
atomicAddFloat(&c.floatBits, -v)
@ -140,8 +110,9 @@ func (c *StandardCounterFloat64) Inc(v float64) {
}
// Snapshot returns a read-only copy of the counter.
func (c *StandardCounterFloat64) Snapshot() CounterFloat64 {
return CounterFloat64Snapshot(c.Count())
func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot {
v := math.Float64frombits(c.floatBits.Load())
return counterFloat64Snapshot(v)
}
func atomicAddFloat(fbits *atomic.Uint64, v float64) {

View File

@ -27,7 +27,7 @@ func BenchmarkCounterFloat64Parallel(b *testing.B) {
}()
}
wg.Wait()
if have, want := c.Count(), 10.0*float64(b.N); have != want {
if have, want := c.Snapshot().Count(), 10.0*float64(b.N); have != want {
b.Fatalf("have %f want %f", have, want)
}
}
@ -36,7 +36,7 @@ func TestCounterFloat64Clear(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
c.Clear()
if count := c.Count(); count != 0 {
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@ -44,7 +44,7 @@ func TestCounterFloat64Clear(t *testing.T) {
func TestCounterFloat64Dec1(t *testing.T) {
c := NewCounterFloat64()
c.Dec(1.0)
if count := c.Count(); count != -1.0 {
if count := c.Snapshot().Count(); count != -1.0 {
t.Errorf("c.Count(): -1.0 != %v\n", count)
}
}
@ -52,7 +52,7 @@ func TestCounterFloat64Dec1(t *testing.T) {
func TestCounterFloat64Dec2(t *testing.T) {
c := NewCounterFloat64()
c.Dec(2.0)
if count := c.Count(); count != -2.0 {
if count := c.Snapshot().Count(); count != -2.0 {
t.Errorf("c.Count(): -2.0 != %v\n", count)
}
}
@ -60,7 +60,7 @@ func TestCounterFloat64Dec2(t *testing.T) {
func TestCounterFloat64Inc1(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
if count := c.Count(); count != 1.0 {
if count := c.Snapshot().Count(); count != 1.0 {
t.Errorf("c.Count(): 1.0 != %v\n", count)
}
}
@ -68,7 +68,7 @@ func TestCounterFloat64Inc1(t *testing.T) {
func TestCounterFloat64Inc2(t *testing.T) {
c := NewCounterFloat64()
c.Inc(2.0)
if count := c.Count(); count != 2.0 {
if count := c.Snapshot().Count(); count != 2.0 {
t.Errorf("c.Count(): 2.0 != %v\n", count)
}
}
@ -85,7 +85,7 @@ func TestCounterFloat64Snapshot(t *testing.T) {
func TestCounterFloat64Zero(t *testing.T) {
c := NewCounterFloat64()
if count := c.Count(); count != 0 {
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@ -93,7 +93,7 @@ func TestCounterFloat64Zero(t *testing.T) {
func TestGetOrRegisterCounterFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredCounterFloat64("foo", r).Inc(47.0)
if c := GetOrRegisterCounterFloat64("foo", r); c.Count() != 47.0 {
if c := GetOrRegisterCounterFloat64("foo", r).Snapshot(); c.Count() != 47.0 {
t.Fatal(c)
}
}

View File

@ -14,7 +14,7 @@ func TestCounterClear(t *testing.T) {
c := NewCounter()
c.Inc(1)
c.Clear()
if count := c.Count(); count != 0 {
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@ -22,7 +22,7 @@ func TestCounterClear(t *testing.T) {
func TestCounterDec1(t *testing.T) {
c := NewCounter()
c.Dec(1)
if count := c.Count(); count != -1 {
if count := c.Snapshot().Count(); count != -1 {
t.Errorf("c.Count(): -1 != %v\n", count)
}
}
@ -30,7 +30,7 @@ func TestCounterDec1(t *testing.T) {
func TestCounterDec2(t *testing.T) {
c := NewCounter()
c.Dec(2)
if count := c.Count(); count != -2 {
if count := c.Snapshot().Count(); count != -2 {
t.Errorf("c.Count(): -2 != %v\n", count)
}
}
@ -38,7 +38,7 @@ func TestCounterDec2(t *testing.T) {
func TestCounterInc1(t *testing.T) {
c := NewCounter()
c.Inc(1)
if count := c.Count(); count != 1 {
if count := c.Snapshot().Count(); count != 1 {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
@ -46,7 +46,7 @@ func TestCounterInc1(t *testing.T) {
func TestCounterInc2(t *testing.T) {
c := NewCounter()
c.Inc(2)
if count := c.Count(); count != 2 {
if count := c.Snapshot().Count(); count != 2 {
t.Errorf("c.Count(): 2 != %v\n", count)
}
}
@ -63,7 +63,7 @@ func TestCounterSnapshot(t *testing.T) {
func TestCounterZero(t *testing.T) {
c := NewCounter()
if count := c.Count(); count != 0 {
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@ -71,7 +71,7 @@ func TestCounterZero(t *testing.T) {
func TestGetOrRegisterCounter(t *testing.T) {
r := NewRegistry()
NewRegisteredCounter("foo", r).Inc(47)
if c := GetOrRegisterCounter("foo", r); c.Count() != 47 {
if c := GetOrRegisterCounter("foo", r).Snapshot(); c.Count() != 47 {
t.Fatal(c)
}
}

View File

@ -1,4 +0,0 @@
package metrics
const epsilon = 0.0000000000000001
const epsilonPercentile = .00000000001

View File

@ -7,11 +7,14 @@ import (
"time"
)
type EWMASnapshot interface {
Rate() float64
}
// EWMAs continuously calculate an exponentially-weighted moving average
// based on an outside source of clock ticks.
type EWMA interface {
Rate() float64
Snapshot() EWMA
Snapshot() EWMASnapshot
Tick()
Update(int64)
}
@ -36,40 +39,19 @@ func NewEWMA15() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
}
// EWMASnapshot is a read-only copy of another EWMA.
type EWMASnapshot float64
// ewmaSnapshot is a read-only copy of another EWMA.
type ewmaSnapshot float64
// Rate returns the rate of events per second at the time the snapshot was
// taken.
func (a EWMASnapshot) Rate() float64 { return float64(a) }
// Snapshot returns the snapshot.
func (a EWMASnapshot) Snapshot() EWMA { return a }
// Tick panics.
func (EWMASnapshot) Tick() {
panic("Tick called on an EWMASnapshot")
}
// Update panics.
func (EWMASnapshot) Update(int64) {
panic("Update called on an EWMASnapshot")
}
func (a ewmaSnapshot) Rate() float64 { return float64(a) }
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
// Rate is a no-op.
func (NilEWMA) Rate() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
// Tick is a no-op.
func (NilEWMA) Tick() {}
// Update is a no-op.
func (NilEWMA) Update(n int64) {}
func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) }
func (NilEWMA) Tick() {}
func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
@ -77,37 +59,50 @@ func (NilEWMA) Update(n int64) {}
type StandardEWMA struct {
uncounted atomic.Int64
alpha float64
rate float64
init bool
rate atomic.Uint64
init atomic.Bool
mutex sync.Mutex
}
// Rate returns the moving average rate of events per second.
func (a *StandardEWMA) Rate() float64 {
a.mutex.Lock()
defer a.mutex.Unlock()
return a.rate * float64(time.Second)
}
// Snapshot returns a read-only copy of the EWMA.
func (a *StandardEWMA) Snapshot() EWMA {
return EWMASnapshot(a.Rate())
func (a *StandardEWMA) Snapshot() EWMASnapshot {
r := math.Float64frombits(a.rate.Load()) * float64(time.Second)
return ewmaSnapshot(r)
}
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
count := a.uncounted.Load()
a.uncounted.Add(-count)
instantRate := float64(count) / float64(5*time.Second)
a.mutex.Lock()
defer a.mutex.Unlock()
if a.init {
a.rate += a.alpha * (instantRate - a.rate)
} else {
a.init = true
a.rate = instantRate
// Optimization to avoid mutex locking in the hot-path.
if a.init.Load() {
a.updateRate(a.fetchInstantRate())
return
}
// Slow-path: this is only needed on the first Tick() and preserves transactional updating
// of init and rate in the else block. The first conditional is needed below because
// a different thread could have set a.init = 1 between the time of the first atomic load and when
// the lock was acquired.
a.mutex.Lock()
if a.init.Load() {
// The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section
// but again, this section is only invoked on the first successful Tick() operation.
a.updateRate(a.fetchInstantRate())
} else {
a.init.Store(true)
a.rate.Store(math.Float64bits(a.fetchInstantRate()))
}
a.mutex.Unlock()
}
func (a *StandardEWMA) fetchInstantRate() float64 {
count := a.uncounted.Swap(0)
return float64(count) / float64(5*time.Second)
}
func (a *StandardEWMA) updateRate(instantRate float64) {
currentRate := math.Float64frombits(a.rate.Load())
currentRate += a.alpha * (instantRate - currentRate)
a.rate.Store(math.Float64bits(currentRate))
}
// Update adds n uncounted events.

View File

@ -5,6 +5,8 @@ import (
"testing"
)
const epsilon = 0.0000000000000001
func BenchmarkEWMA(b *testing.B) {
a := NewEWMA1()
b.ResetTimer()
@ -14,72 +16,33 @@ func BenchmarkEWMA(b *testing.B) {
}
}
func BenchmarkEWMAParallel(b *testing.B) {
a := NewEWMA1()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
a.Update(1)
a.Tick()
}
})
}
func TestEWMA1(t *testing.T) {
a := NewEWMA1()
a.Update(3)
a.Tick()
if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.22072766470286553-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.08120116994196772-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.029872241020718428-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.01098938333324054-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.004042768199451294-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.0014872513059998212-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.0005471291793327122-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.00020127757674150815-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(7.404588245200814e-05-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(2.7239957857491083e-05-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(1.0021020474147462e-05-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(3.6865274119969525e-06-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(1.3561976441886433e-06-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(4.989172314621449e-07-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(1.8354139230109722e-07-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
for i, want := range []float64{0.6,
0.22072766470286553, 0.08120116994196772, 0.029872241020718428,
0.01098938333324054, 0.004042768199451294, 0.0014872513059998212,
0.0005471291793327122, 0.00020127757674150815, 7.404588245200814e-05,
2.7239957857491083e-05, 1.0021020474147462e-05, 3.6865274119969525e-06,
1.3561976441886433e-06, 4.989172314621449e-07, 1.8354139230109722e-07,
} {
if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
}
elapseMinute(a)
}
}
@ -87,68 +50,17 @@ func TestEWMA5(t *testing.T) {
a := NewEWMA5()
a.Update(3)
a.Tick()
if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.49123845184678905-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.4021920276213837-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.32928698165641596-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.269597378470333-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.2207276647028654-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.18071652714732128-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.14795817836496392-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.12113791079679326-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.09917933293295193-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.08120116994196763-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.06648189501740036-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.05443077197364752-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.04456414692860035-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.03648603757513079-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.0298722410207183831020718428-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
for i, want := range []float64{
0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596,
0.269597378470333, 0.2207276647028654, 0.18071652714732128,
0.14795817836496392, 0.12113791079679326, 0.09917933293295193,
0.08120116994196763, 0.06648189501740036, 0.05443077197364752,
0.04456414692860035, 0.03648603757513079, 0.0298722410207183831020718428,
} {
if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
}
elapseMinute(a)
}
}
@ -156,68 +68,17 @@ func TestEWMA15(t *testing.T) {
a := NewEWMA15()
a.Update(3)
a.Tick()
if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.5613041910189706-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.5251039914257684-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.4912384518467888184678905-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.459557003018789-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.4299187863442732-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.4021920276213831-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.37625345116383313-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.3519877317060185-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.3292869816564153165641596-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.3080502714195546-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.2881831806538789-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.26959737847033216-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.2522102307052083-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.23594443252115815-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); math.Abs(0.2207276647028646247028654470286553-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
for i, want := range []float64{
0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905,
0.459557003018789, 0.4299187863442732, 0.4021920276213831,
0.37625345116383313, 0.3519877317060185, 0.3292869816564153165641596,
0.3080502714195546, 0.2881831806538789, 0.26959737847033216,
0.2522102307052083, 0.23594443252115815, 0.2207276647028646247028654470286553,
} {
if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
}
elapseMinute(a)
}
}

View File

@ -109,25 +109,25 @@ func (exp *exp) getInfo(name string) *expvar.String {
return v
}
func (exp *exp) publishCounter(name string, metric metrics.Counter) {
func (exp *exp) publishCounter(name string, metric metrics.CounterSnapshot) {
v := exp.getInt(name)
v.Set(metric.Count())
}
func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64) {
func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64Snapshot) {
v := exp.getFloat(name)
v.Set(metric.Count())
}
func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
func (exp *exp) publishGauge(name string, metric metrics.GaugeSnapshot) {
v := exp.getInt(name)
v.Set(metric.Value())
}
func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64Snapshot) {
exp.getFloat(name).Set(metric.Value())
}
func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfo) {
func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfoSnapshot) {
exp.getInfo(name).Set(metric.Value().String())
}
@ -176,28 +176,28 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) {
func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) {
t := metric.Snapshot()
ps := t.Percentiles([]float64{50, 75, 95, 99})
exp.getInt(name + ".count").Set(int64(len(t.Values())))
ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99})
exp.getInt(name + ".count").Set(int64(t.Count()))
exp.getFloat(name + ".mean").Set(t.Mean())
exp.getInt(name + ".50-percentile").Set(ps[0])
exp.getInt(name + ".75-percentile").Set(ps[1])
exp.getInt(name + ".95-percentile").Set(ps[2])
exp.getInt(name + ".99-percentile").Set(ps[3])
exp.getFloat(name + ".50-percentile").Set(ps[0])
exp.getFloat(name + ".75-percentile").Set(ps[1])
exp.getFloat(name + ".95-percentile").Set(ps[2])
exp.getFloat(name + ".99-percentile").Set(ps[3])
}
func (exp *exp) syncToExpvar() {
exp.registry.Each(func(name string, i interface{}) {
switch i := i.(type) {
case metrics.Counter:
exp.publishCounter(name, i)
exp.publishCounter(name, i.Snapshot())
case metrics.CounterFloat64:
exp.publishCounterFloat64(name, i)
exp.publishCounterFloat64(name, i.Snapshot())
case metrics.Gauge:
exp.publishGauge(name, i)
exp.publishGauge(name, i.Snapshot())
case metrics.GaugeFloat64:
exp.publishGaugeFloat64(name, i)
exp.publishGaugeFloat64(name, i.Snapshot())
case metrics.GaugeInfo:
exp.publishGaugeInfo(name, i)
exp.publishGaugeInfo(name, i.Snapshot())
case metrics.Histogram:
exp.publishHistogram(name, i)
case metrics.Meter:

View File

@ -2,13 +2,18 @@ package metrics
import "sync/atomic"
// gaugeSnapshot contains a readonly int64.
type GaugeSnapshot interface {
Value() int64
}
// Gauges hold an int64 value that can be set arbitrarily.
type Gauge interface {
Snapshot() Gauge
Snapshot() GaugeSnapshot
Update(int64)
UpdateIfGt(int64)
Dec(int64)
Inc(int64)
Value() int64
}
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
@ -38,65 +43,20 @@ func NewRegisteredGauge(name string, r Registry) Gauge {
return c
}
// NewFunctionalGauge constructs a new FunctionalGauge.
func NewFunctionalGauge(f func() int64) Gauge {
if !Enabled {
return NilGauge{}
}
return &FunctionalGauge{value: f}
}
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
c := NewFunctionalGauge(f)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeSnapshot is a read-only copy of another Gauge.
type GaugeSnapshot int64
// Snapshot returns the snapshot.
func (g GaugeSnapshot) Snapshot() Gauge { return g }
// Update panics.
func (GaugeSnapshot) Update(int64) {
panic("Update called on a GaugeSnapshot")
}
// Dec panics.
func (GaugeSnapshot) Dec(int64) {
panic("Dec called on a GaugeSnapshot")
}
// Inc panics.
func (GaugeSnapshot) Inc(int64) {
panic("Inc called on a GaugeSnapshot")
}
// gaugeSnapshot is a read-only copy of another Gauge.
type gaugeSnapshot int64
// Value returns the value at the time the snapshot was taken.
func (g GaugeSnapshot) Value() int64 { return int64(g) }
func (g gaugeSnapshot) Value() int64 { return int64(g) }
// NilGauge is a no-op Gauge.
type NilGauge struct{}
// Snapshot is a no-op.
func (NilGauge) Snapshot() Gauge { return NilGauge{} }
// Update is a no-op.
func (NilGauge) Update(v int64) {}
// Dec is a no-op.
func (NilGauge) Dec(i int64) {}
// Inc is a no-op.
func (NilGauge) Inc(i int64) {}
// Value is a no-op.
func (NilGauge) Value() int64 { return 0 }
func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) }
func (NilGauge) Update(v int64) {}
func (NilGauge) UpdateIfGt(v int64) {}
func (NilGauge) Dec(i int64) {}
func (NilGauge) Inc(i int64) {}
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
@ -105,8 +65,8 @@ type StandardGauge struct {
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGauge) Snapshot() Gauge {
return GaugeSnapshot(g.Value())
func (g *StandardGauge) Snapshot() GaugeSnapshot {
return gaugeSnapshot(g.value.Load())
}
// Update updates the gauge's value.
@ -114,9 +74,17 @@ func (g *StandardGauge) Update(v int64) {
g.value.Store(v)
}
// Value returns the gauge's current value.
func (g *StandardGauge) Value() int64 {
return g.value.Load()
// Update updates the gauge's value if v is larger then the current valie.
func (g *StandardGauge) UpdateIfGt(v int64) {
for {
exist := g.value.Load()
if exist >= v {
break
}
if g.value.CompareAndSwap(exist, v) {
break
}
}
}
// Dec decrements the gauge's current value by the given amount.
@ -128,31 +96,3 @@ func (g *StandardGauge) Dec(i int64) {
func (g *StandardGauge) Inc(i int64) {
g.value.Add(i)
}
// FunctionalGauge returns value from given function
type FunctionalGauge struct {
value func() int64
}
// Value returns the gauge's current value.
func (g FunctionalGauge) Value() int64 {
return g.value()
}
// Snapshot returns the snapshot.
func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
// Update panics.
func (FunctionalGauge) Update(int64) {
panic("Update called on a FunctionalGauge")
}
// Dec panics.
func (FunctionalGauge) Dec(int64) {
panic("Dec called on a FunctionalGauge")
}
// Inc panics.
func (FunctionalGauge) Inc(int64) {
panic("Inc called on a FunctionalGauge")
}

View File

@ -5,13 +5,16 @@ import (
"sync/atomic"
)
// GaugeFloat64s hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
Snapshot() GaugeFloat64
Update(float64)
type GaugeFloat64Snapshot interface {
Value() float64
}
// GaugeFloat64 hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
Snapshot() GaugeFloat64Snapshot
Update(float64)
}
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
// new StandardGaugeFloat64.
func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
@ -39,49 +42,18 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
return c
}
// NewFunctionalGauge constructs a new FunctionalGauge.
func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
if !Enabled {
return NilGaugeFloat64{}
}
return &FunctionalGaugeFloat64{value: f}
}
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
c := NewFunctionalGaugeFloat64(f)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
type GaugeFloat64Snapshot float64
// Snapshot returns the snapshot.
func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
// Update panics.
func (GaugeFloat64Snapshot) Update(float64) {
panic("Update called on a GaugeFloat64Snapshot")
}
// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
type gaugeFloat64Snapshot float64
// Value returns the value at the time the snapshot was taken.
func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
// NilGauge is a no-op Gauge.
type NilGaugeFloat64 struct{}
// Snapshot is a no-op.
func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
// Update is a no-op.
func (NilGaugeFloat64) Update(v float64) {}
// Value is a no-op.
func (NilGaugeFloat64) Value() float64 { return 0.0 }
func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
func (NilGaugeFloat64) Update(v float64) {}
func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
// atomic to manage a single float64 value.
@ -90,34 +62,12 @@ type StandardGaugeFloat64 struct {
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
return GaugeFloat64Snapshot(g.Value())
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot {
v := math.Float64frombits(g.floatBits.Load())
return gaugeFloat64Snapshot(v)
}
// Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) {
g.floatBits.Store(math.Float64bits(v))
}
// Value returns the gauge's current value.
func (g *StandardGaugeFloat64) Value() float64 {
return math.Float64frombits(g.floatBits.Load())
}
// FunctionalGaugeFloat64 returns value from given function
type FunctionalGaugeFloat64 struct {
value func() float64
}
// Value returns the gauge's current value.
func (g FunctionalGaugeFloat64) Value() float64 {
return g.value()
}
// Snapshot returns the snapshot.
func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
// Update panics.
func (FunctionalGaugeFloat64) Update(float64) {
panic("Update called on a FunctionalGaugeFloat64")
}

View File

@ -26,19 +26,11 @@ func BenchmarkGaugeFloat64Parallel(b *testing.B) {
}()
}
wg.Wait()
if have, want := c.Value(), float64(b.N-1); have != want {
if have, want := c.Snapshot().Value(), float64(b.N-1); have != want {
b.Fatalf("have %f want %f", have, want)
}
}
func TestGaugeFloat64(t *testing.T) {
g := NewGaugeFloat64()
g.Update(47.0)
if v := g.Value(); 47.0 != v {
t.Errorf("g.Value(): 47.0 != %v\n", v)
}
}
func TestGaugeFloat64Snapshot(t *testing.T) {
g := NewGaugeFloat64()
g.Update(47.0)
@ -53,28 +45,7 @@ func TestGetOrRegisterGaugeFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredGaugeFloat64("foo", r).Update(47.0)
t.Logf("registry: %v", r)
if g := GetOrRegisterGaugeFloat64("foo", r); 47.0 != g.Value() {
t.Fatal(g)
}
}
func TestFunctionalGaugeFloat64(t *testing.T) {
var counter float64
fg := NewFunctionalGaugeFloat64(func() float64 {
counter++
return counter
})
fg.Value()
fg.Value()
if counter != 2 {
t.Error("counter != 2")
}
}
func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 })
if g := GetOrRegisterGaugeFloat64("foo", r); g.Value() != 47 {
if g := GetOrRegisterGaugeFloat64("foo", r).Snapshot(); 47.0 != g.Value() {
t.Fatal(g)
}
}

View File

@ -5,14 +5,17 @@ import (
"sync"
)
// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
type GaugeInfo interface {
Snapshot() GaugeInfo
Update(GaugeInfoValue)
type GaugeInfoSnapshot interface {
Value() GaugeInfoValue
}
// GaugeInfoValue is a mappng of (string) keys to (string) values
// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
type GaugeInfo interface {
Update(GaugeInfoValue)
Snapshot() GaugeInfoSnapshot
}
// GaugeInfoValue is a mapping of keys to values
type GaugeInfoValue map[string]string
func (val GaugeInfoValue) String() string {
@ -49,49 +52,17 @@ func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo {
return c
}
// NewFunctionalGauge constructs a new FunctionalGauge.
func NewFunctionalGaugeInfo(f func() GaugeInfoValue) GaugeInfo {
if !Enabled {
return NilGaugeInfo{}
}
return &FunctionalGaugeInfo{value: f}
}
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
func NewRegisteredFunctionalGaugeInfo(name string, r Registry, f func() GaugeInfoValue) GaugeInfo {
c := NewFunctionalGaugeInfo(f)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeInfoSnapshot is a read-only copy of another GaugeInfo.
type GaugeInfoSnapshot GaugeInfoValue
// Snapshot returns the snapshot.
func (g GaugeInfoSnapshot) Snapshot() GaugeInfo { return g }
// Update panics.
func (GaugeInfoSnapshot) Update(GaugeInfoValue) {
panic("Update called on a GaugeInfoSnapshot")
}
// gaugeInfoSnapshot is a read-only copy of another GaugeInfo.
type gaugeInfoSnapshot GaugeInfoValue
// Value returns the value at the time the snapshot was taken.
func (g GaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) }
func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) }
// NilGauge is a no-op Gauge.
type NilGaugeInfo struct{}
// Snapshot is a no-op.
func (NilGaugeInfo) Snapshot() GaugeInfo { return NilGaugeInfo{} }
// Update is a no-op.
func (NilGaugeInfo) Update(v GaugeInfoValue) {}
// Value is a no-op.
func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} }
func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} }
func (NilGaugeInfo) Update(v GaugeInfoValue) {}
func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} }
// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses
// sync.Mutex to manage a single string value.
@ -101,8 +72,8 @@ type StandardGaugeInfo struct {
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGaugeInfo) Snapshot() GaugeInfo {
return GaugeInfoSnapshot(g.Value())
func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot {
return gaugeInfoSnapshot(g.value)
}
// Update updates the gauge's value.
@ -111,34 +82,3 @@ func (g *StandardGaugeInfo) Update(v GaugeInfoValue) {
defer g.mutex.Unlock()
g.value = v
}
// Value returns the gauge's current value.
func (g *StandardGaugeInfo) Value() GaugeInfoValue {
g.mutex.Lock()
defer g.mutex.Unlock()
return g.value
}
// FunctionalGaugeInfo returns value from given function
type FunctionalGaugeInfo struct {
value func() GaugeInfoValue
}
// Value returns the gauge's current value.
func (g FunctionalGaugeInfo) Value() GaugeInfoValue {
return g.value()
}
// Value returns the gauge's current value in JSON string format
func (g FunctionalGaugeInfo) ValueJsonString() string {
data, _ := json.Marshal(g.value())
return string(data)
}
// Snapshot returns the snapshot.
func (g FunctionalGaugeInfo) Snapshot() GaugeInfo { return GaugeInfoSnapshot(g.Value()) }
// Update panics.
func (FunctionalGaugeInfo) Update(GaugeInfoValue) {
panic("Update called on a FunctionalGaugeInfo")
}

View File

@ -1,7 +1,6 @@
package metrics
import (
"strconv"
"testing"
)
@ -14,22 +13,14 @@ func TestGaugeInfoJsonString(t *testing.T) {
},
)
want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}`
if have := g.Value().String(); have != want {
t.Errorf("\nhave: %v\nwant: %v\n", have, want)
}
}
func TestGaugeInfoSnapshot(t *testing.T) {
g := NewGaugeInfo()
g.Update(GaugeInfoValue{"value": "original"})
snapshot := g.Snapshot() // Snapshot @chainid 5
original := g.Snapshot()
g.Update(GaugeInfoValue{"value": "updated"})
// The 'g' should be updated
if have, want := g.Value().String(), `{"value":"updated"}`; have != want {
if have := original.Value().String(); have != want {
t.Errorf("\nhave: %v\nwant: %v\n", have, want)
}
// Snapshot should be unupdated
if have, want := snapshot.Value().String(), `{"value":"original"}`; have != want {
if have, want := g.Snapshot().Value().String(), `{"value":"updated"}`; have != want {
t.Errorf("\nhave: %v\nwant: %v\n", have, want)
}
}
@ -38,38 +29,8 @@ func TestGetOrRegisterGaugeInfo(t *testing.T) {
r := NewRegistry()
NewRegisteredGaugeInfo("foo", r).Update(
GaugeInfoValue{"chain_id": "5"})
g := GetOrRegisterGaugeInfo("foo", r)
g := GetOrRegisterGaugeInfo("foo", r).Snapshot()
if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want {
t.Errorf("have\n%v\nwant\n%v\n", have, want)
}
}
func TestFunctionalGaugeInfo(t *testing.T) {
info := GaugeInfoValue{"chain_id": "0"}
counter := 1
// A "functional" gauge invokes the method to obtain the value
fg := NewFunctionalGaugeInfo(func() GaugeInfoValue {
info["chain_id"] = strconv.Itoa(counter)
counter++
return info
})
fg.Value()
fg.Value()
if have, want := info["chain_id"], "2"; have != want {
t.Errorf("have %v want %v", have, want)
}
}
func TestGetOrRegisterFunctionalGaugeInfo(t *testing.T) {
r := NewRegistry()
NewRegisteredFunctionalGaugeInfo("foo", r, func() GaugeInfoValue {
return GaugeInfoValue{
"chain_id": "5",
}
})
want := `{"chain_id":"5"}`
have := GetOrRegisterGaugeInfo("foo", r).Value().String()
if have != want {
t.Errorf("have\n%v\nwant\n%v\n", have, want)
}
}

View File

@ -1,7 +1,6 @@
package metrics
import (
"fmt"
"testing"
)
@ -13,14 +12,6 @@ func BenchmarkGauge(b *testing.B) {
}
}
func TestGauge(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
if v := g.Value(); v != 47 {
t.Errorf("g.Value(): 47 != %v\n", v)
}
}
func TestGaugeSnapshot(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
@ -34,35 +25,7 @@ func TestGaugeSnapshot(t *testing.T) {
func TestGetOrRegisterGauge(t *testing.T) {
r := NewRegistry()
NewRegisteredGauge("foo", r).Update(47)
if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
if g := GetOrRegisterGauge("foo", r); g.Snapshot().Value() != 47 {
t.Fatal(g)
}
}
func TestFunctionalGauge(t *testing.T) {
var counter int64
fg := NewFunctionalGauge(func() int64 {
counter++
return counter
})
fg.Value()
fg.Value()
if counter != 2 {
t.Error("counter != 2")
}
}
func TestGetOrRegisterFunctionalGauge(t *testing.T) {
r := NewRegistry()
NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 })
if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
t.Fatal(g)
}
}
func ExampleGetOrRegisterGauge() {
m := "server.bytes_sent"
g := GetOrRegisterGauge(m, nil)
g.Update(47)
fmt.Println(g.Value()) // Output: 47
}

View File

@ -66,15 +66,15 @@ func graphite(c *GraphiteConfig) error {
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case CounterFloat64:
fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Count(), now)
fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case Gauge:
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeFloat64:
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeInfo:
fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Value().String(), now)
fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles(c.Percentiles)

View File

@ -1,20 +1,14 @@
package metrics
type HistogramSnapshot interface {
SampleSnapshot
}
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Sample() Sample
Snapshot() Histogram
StdDev() float64
Sum() int64
Update(int64)
Variance() float64
Snapshot() HistogramSnapshot
}
// GetOrRegisterHistogram returns an existing Histogram or constructs and
@ -54,108 +48,12 @@ func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
return c
}
// HistogramSnapshot is a read-only copy of another Histogram.
type HistogramSnapshot struct {
sample *SampleSnapshot
}
// Clear panics.
func (*HistogramSnapshot) Clear() {
panic("Clear called on a HistogramSnapshot")
}
// Count returns the number of samples recorded at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample at the time the snapshot
// was taken.
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the sample
// at the time the snapshot was taken.
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
// Snapshot returns the snapshot.
func (h *HistogramSnapshot) Snapshot() Histogram { return h }
// StdDev returns the standard deviation of the values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample at the time the snapshot was taken.
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
// Update panics.
func (*HistogramSnapshot) Update(int64) {
panic("Update called on a HistogramSnapshot")
}
// Variance returns the variance of inputs at the time the snapshot was taken.
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
// Clear is a no-op.
func (NilHistogram) Clear() {}
// Count is a no-op.
func (NilHistogram) Count() int64 { return 0 }
// Max is a no-op.
func (NilHistogram) Max() int64 { return 0 }
// Mean is a no-op.
func (NilHistogram) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilHistogram) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilHistogram) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Sample is a no-op.
func (NilHistogram) Sample() Sample { return NilSample{} }
// Snapshot is a no-op.
func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
// StdDev is a no-op.
func (NilHistogram) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilHistogram) Sum() int64 { return 0 }
// Update is a no-op.
func (NilHistogram) Update(v int64) {}
// Variance is a no-op.
func (NilHistogram) Variance() float64 { return 0.0 }
func (NilHistogram) Clear() {}
func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) }
func (NilHistogram) Update(v int64) {}
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
@ -166,46 +64,10 @@ type StandardHistogram struct {
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
// Count returns the number of samples recorded since the histogram was last
// cleared.
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample.
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample.
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample.
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of the values in the sample.
func (h *StandardHistogram) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *StandardHistogram) Sample() Sample { return h.sample }
// Snapshot returns a read-only copy of the histogram.
func (h *StandardHistogram) Snapshot() Histogram {
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
func (h *StandardHistogram) Snapshot() HistogramSnapshot {
return h.sample.Snapshot()
}
// StdDev returns the standard deviation of the values in the sample.
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample.
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
// Variance returns the variance of the values in the sample.
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

View File

@ -14,7 +14,7 @@ func TestGetOrRegisterHistogram(t *testing.T) {
r := NewRegistry()
s := NewUniformSample(100)
NewRegisteredHistogram("foo", r, s).Update(47)
if h := GetOrRegisterHistogram("foo", r, s); h.Count() != 1 {
if h := GetOrRegisterHistogram("foo", r, s).Snapshot(); h.Count() != 1 {
t.Fatal(h)
}
}
@ -24,11 +24,11 @@ func TestHistogram10000(t *testing.T) {
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
testHistogram10000(t, h)
testHistogram10000(t, h.Snapshot())
}
func TestHistogramEmpty(t *testing.T) {
h := NewHistogram(NewUniformSample(100))
h := NewHistogram(NewUniformSample(100)).Snapshot()
if count := h.Count(); count != 0 {
t.Errorf("h.Count(): 0 != %v\n", count)
}
@ -66,7 +66,7 @@ func TestHistogramSnapshot(t *testing.T) {
testHistogram10000(t, snapshot)
}
func testHistogram10000(t *testing.T, h Histogram) {
func testHistogram10000(t *testing.T, h HistogramSnapshot) {
if count := h.Count(); count != 10000 {
t.Errorf("h.Count(): 10000 != %v\n", count)
}

48
metrics/inactive.go Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package metrics
// compile-time checks that interfaces are implemented.
var (
_ SampleSnapshot = (*emptySnapshot)(nil)
_ HistogramSnapshot = (*emptySnapshot)(nil)
_ CounterSnapshot = (*emptySnapshot)(nil)
_ GaugeSnapshot = (*emptySnapshot)(nil)
_ MeterSnapshot = (*emptySnapshot)(nil)
_ EWMASnapshot = (*emptySnapshot)(nil)
_ TimerSnapshot = (*emptySnapshot)(nil)
)
type emptySnapshot struct{}
func (*emptySnapshot) Count() int64 { return 0 }
func (*emptySnapshot) Max() int64 { return 0 }
func (*emptySnapshot) Mean() float64 { return 0.0 }
func (*emptySnapshot) Min() int64 { return 0 }
func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 }
func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) }
func (*emptySnapshot) Size() int { return 0 }
func (*emptySnapshot) StdDev() float64 { return 0.0 }
func (*emptySnapshot) Sum() int64 { return 0 }
func (*emptySnapshot) Values() []int64 { return []int64{} }
func (*emptySnapshot) Variance() float64 { return 0.0 }
func (*emptySnapshot) Value() int64 { return 0 }
func (*emptySnapshot) Rate() float64 { return 0.0 }
func (*emptySnapshot) Rate1() float64 { return 0.0 }
func (*emptySnapshot) Rate5() float64 { return 0.0 }
func (*emptySnapshot) Rate15() float64 { return 0.0 }
func (*emptySnapshot) RateMean() float64 { return 0.0 }

View File

@ -11,13 +11,13 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf
case metrics.Counter:
measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{
"value": metric.Count(),
"value": metric.Snapshot().Count(),
}
return measurement, fields
case metrics.CounterFloat64:
measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{
"value": metric.Count(),
"value": metric.Snapshot().Count(),
}
return measurement, fields
case metrics.Gauge:
@ -99,20 +99,19 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf
return measurement, fields
case metrics.ResettingTimer:
t := metric.Snapshot()
if len(t.Values()) == 0 {
if t.Count() == 0 {
break
}
ps := t.Percentiles([]float64{50, 95, 99})
val := t.Values()
ps := t.Percentiles([]float64{0.50, 0.95, 0.99})
measurement := fmt.Sprintf("%s%s.span", namespace, name)
fields := map[string]interface{}{
"count": len(val),
"max": val[len(val)-1],
"count": t.Count(),
"max": t.Max(),
"mean": t.Mean(),
"min": val[0],
"p50": ps[0],
"p95": ps[1],
"p99": ps[2],
"min": t.Min(),
"p50": int(ps[0]),
"p95": int(ps[1]),
"p99": int(ps[2]),
}
return measurement, fields
}

View File

@ -96,7 +96,7 @@ func TestExampleV2(t *testing.T) {
}
if have != want {
t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
t.Logf("have vs want:\n %v", findFirstDiffPos(have, want))
t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
}
}

View File

@ -1,3 +1,5 @@
goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
goth.test/counter.count value=12345 978307200000000000
goth.test/counter_float64.count value=54321.98 978307200000000000
goth.test/gauge.gauge value=23456i 978307200000000000
@ -5,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000
goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000
goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000

View File

@ -1,3 +1,5 @@
goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
goth.test/counter.count value=12345 978307200000000000
goth.test/counter_float64.count value=54321.98 978307200000000000
goth.test/gauge.gauge value=23456i 978307200000000000
@ -5,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000
goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000
goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000

View File

@ -17,6 +17,9 @@
package internal
import (
"bytes"
"encoding/gob"
metrics2 "runtime/metrics"
"time"
"github.com/ethereum/go-ethereum/metrics"
@ -38,7 +41,15 @@ func ExampleMetrics() metrics.Registry {
"commit": "7caa2d8163ae3132c1c2d6978c76610caee2d949",
"protocol_versions": "64 65 66",
})
metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3}))
{
s := metrics.NewUniformSample(3)
s.Update(1)
s.Update(2)
s.Update(3)
//metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3}))
metrics.NewRegisteredHistogram("test/histogram", registry, s)
}
registry.Register("test/meter", metrics.NewInactiveMeter())
{
timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry)
@ -60,5 +71,25 @@ func ExampleMetrics() metrics.Registry {
timer.Stop()
}
registry.Register("test/empty_resetting_timer", metrics.NewResettingTimer().Snapshot())
{ // go runtime metrics
var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
var gcPauses = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06R\xff\x82\x01\xff\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
var secondsToNs = float64(time.Second)
dserialize := func(data string) *metrics2.Float64Histogram {
var res metrics2.Float64Histogram
if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
panic(err)
}
return &res
}
cpuSchedLatency := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(sLatency))
registry.Register("system/cpu/schedlatency", cpuSchedLatency)
memPauses := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(gcPauses))
registry.Register("system/memory/pauses", memPauses)
}
return registry
}

View File

@ -0,0 +1,27 @@
package internal
import (
"bytes"
"encoding/gob"
"fmt"
metrics2 "runtime/metrics"
"testing"
"time"
"github.com/ethereum/go-ethereum/metrics"
)
func TestCollectRuntimeMetrics(t *testing.T) {
t.Skip("Only used for generating testdata")
serialize := func(path string, histogram *metrics2.Float64Histogram) {
var f = new(bytes.Buffer)
if err := gob.NewEncoder(f).Encode(histogram); err != nil {
panic(err)
}
fmt.Printf("var %v = %q\n", path, f.Bytes())
}
time.Sleep(2 * time.Second)
stats := metrics.ReadRuntimeStats()
serialize("schedlatency", stats.SchedLatency)
serialize("gcpauses", stats.GCPauses)
}

View File

@ -61,16 +61,16 @@ func (rep *Reporter) Run() {
// calculate sum of squares from data provided by metrics.Histogram
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
func sumSquares(s metrics.Sample) float64 {
count := float64(s.Count())
sumSquared := math.Pow(count*s.Mean(), 2)
sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
func sumSquares(icount int64, mean, stDev float64) float64 {
count := float64(icount)
sumSquared := math.Pow(count*mean, 2)
sumSquares := math.Pow(count*stDev, 2) + sumSquared/count
if math.IsNaN(sumSquares) {
return 0.0
}
return sumSquares
}
func sumSquaresTimer(t metrics.Timer) float64 {
func sumSquaresTimer(t metrics.TimerSnapshot) float64 {
count := float64(t.Count())
sumSquared := math.Pow(count*t.Mean(), 2)
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
@ -97,9 +97,10 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
measurement[Period] = rep.Interval.Seconds()
switch m := metric.(type) {
case metrics.Counter:
if m.Count() > 0 {
ms := m.Snapshot()
if ms.Count() > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
measurement[Value] = float64(m.Count())
measurement[Value] = float64(ms.Count())
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@ -108,9 +109,9 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
snapshot.Counters = append(snapshot.Counters, measurement)
}
case metrics.CounterFloat64:
if m.Count() > 0 {
if count := m.Snapshot().Count(); count > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
measurement[Value] = m.Count()
measurement[Value] = count
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@ -120,44 +121,45 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
}
case metrics.Gauge:
measurement[Name] = name
measurement[Value] = float64(m.Value())
measurement[Value] = float64(m.Snapshot().Value())
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.GaugeFloat64:
measurement[Name] = name
measurement[Value] = m.Value()
measurement[Value] = m.Snapshot().Value()
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.GaugeInfo:
measurement[Name] = name
measurement[Value] = m.Value()
measurement[Value] = m.Snapshot().Value()
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.Histogram:
if m.Count() > 0 {
ms := m.Snapshot()
if ms.Count() > 0 {
gauges := make([]Measurement, histogramGaugeCount)
s := m.Sample()
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
measurement[Count] = uint64(s.Count())
measurement[Max] = float64(s.Max())
measurement[Min] = float64(s.Min())
measurement[Sum] = float64(s.Sum())
measurement[SumSquares] = sumSquares(s)
measurement[Count] = uint64(ms.Count())
measurement[Max] = float64(ms.Max())
measurement[Min] = float64(ms.Min())
measurement[Sum] = float64(ms.Sum())
measurement[SumSquares] = sumSquares(ms.Count(), ms.Mean(), ms.StdDev())
gauges[0] = measurement
for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
Value: s.Percentile(p),
Value: ms.Percentile(p),
Period: measurement[Period],
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
}
case metrics.Meter:
ms := m.Snapshot()
measurement[Name] = name
measurement[Value] = float64(m.Count())
measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "1min"),
Value: m.Rate1(),
Value: ms.Rate1(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@ -167,7 +169,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "5min"),
Value: m.Rate5(),
Value: ms.Rate5(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@ -177,7 +179,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "15min"),
Value: m.Rate15(),
Value: ms.Rate15(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@ -187,26 +189,27 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
},
)
case metrics.Timer:
ms := m.Snapshot()
measurement[Name] = name
measurement[Value] = float64(m.Count())
measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
if m.Count() > 0 {
if ms.Count() > 0 {
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
gauges := make([]Measurement, histogramGaugeCount)
gauges[0] = Measurement{
Name: libratoName,
Count: uint64(m.Count()),
Sum: m.Mean() * float64(m.Count()),
Max: float64(m.Max()),
Min: float64(m.Min()),
SumSquares: sumSquaresTimer(m),
Count: uint64(ms.Count()),
Sum: ms.Mean() * float64(ms.Count()),
Max: float64(ms.Max()),
Min: float64(ms.Min()),
SumSquares: sumSquaresTimer(ms),
Period: int64(rep.Interval.Seconds()),
Attributes: rep.TimerAttributes,
}
for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
Value: m.Percentile(p),
Value: ms.Percentile(p),
Period: int64(rep.Interval.Seconds()),
Attributes: rep.TimerAttributes,
}
@ -215,7 +218,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
Value: m.Rate1(),
Value: ms.Rate1(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@ -225,7 +228,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
Value: m.Rate5(),
Value: ms.Rate5(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@ -235,7 +238,7 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
Value: m.Rate15(),
Value: ms.Rate15(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,

View File

@ -23,19 +23,19 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
switch metric := i.(type) {
case Counter:
l.Printf("counter %s\n", name)
l.Printf(" count: %9d\n", metric.Count())
l.Printf(" count: %9d\n", metric.Snapshot().Count())
case CounterFloat64:
l.Printf("counter %s\n", name)
l.Printf(" count: %f\n", metric.Count())
l.Printf(" count: %f\n", metric.Snapshot().Count())
case Gauge:
l.Printf("gauge %s\n", name)
l.Printf(" value: %9d\n", metric.Value())
l.Printf(" value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
l.Printf("gauge %s\n", name)
l.Printf(" value: %f\n", metric.Value())
l.Printf(" value: %f\n", metric.Snapshot().Value())
case GaugeInfo:
l.Printf("gauge %s\n", name)
l.Printf(" value: %s\n", metric.Value())
l.Printf(" value: %s\n", metric.Snapshot().Value())
case Healthcheck:
metric.Check()
l.Printf("healthcheck %s\n", name)

View File

@ -1,21 +1,25 @@
package metrics
import (
"math"
"sync"
"sync/atomic"
"time"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
type MeterSnapshot interface {
Count() int64
Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
Snapshot() Meter
}
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Mark(int64)
Snapshot() MeterSnapshot
Stop()
}
@ -30,17 +34,6 @@ func GetOrRegisterMeter(name string, r Registry) Meter {
return r.GetOrRegister(name, NewMeter).(Meter)
}
// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a
// new StandardMeter no matter the global switch is enabled or not.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func GetOrRegisterMeterForced(name string, r Registry) Meter {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewMeterForced).(Meter)
}
// NewMeter constructs a new StandardMeter and launches a goroutine.
// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
func NewMeter() Meter {
@ -68,115 +61,53 @@ func NewInactiveMeter() Meter {
return m
}
// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter
// the global switch is enabled or not.
// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
func NewMeterForced() Meter {
m := newStandardMeter()
arbiter.Lock()
defer arbiter.Unlock()
arbiter.meters[m] = struct{}{}
if !arbiter.started {
arbiter.started = true
go arbiter.tick()
}
return m
}
// NewRegisteredMeter constructs and registers a new StandardMeter
// and launches a goroutine.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredMeter(name string, r Registry) Meter {
c := NewMeter()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
return GetOrRegisterMeter(name, r)
}
// NewRegisteredMeterForced constructs and registers a new StandardMeter
// and launches a goroutine no matter the global switch is enabled or not.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredMeterForced(name string, r Registry) Meter {
c := NewMeterForced()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// MeterSnapshot is a read-only copy of another Meter.
type MeterSnapshot struct {
temp atomic.Int64
// meterSnapshot is a read-only copy of the meter's internal values.
type meterSnapshot struct {
count int64
rate1, rate5, rate15, rateMean float64
}
// Count returns the count of events at the time the snapshot was taken.
func (m *MeterSnapshot) Count() int64 { return m.count }
// Mark panics.
func (*MeterSnapshot) Mark(n int64) {
panic("Mark called on a MeterSnapshot")
}
func (m *meterSnapshot) Count() int64 { return m.count }
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
func (m *meterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
func (m *meterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
func (m *meterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
// Snapshot returns the snapshot.
func (m *MeterSnapshot) Snapshot() Meter { return m }
// Stop is a no-op.
func (m *MeterSnapshot) Stop() {}
func (m *meterSnapshot) RateMean() float64 { return m.rateMean }
// NilMeter is a no-op Meter.
type NilMeter struct{}
// Count is a no-op.
func (NilMeter) Count() int64 { return 0 }
// Mark is a no-op.
func (NilMeter) Mark(n int64) {}
// Rate1 is a no-op.
func (NilMeter) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilMeter) Rate5() float64 { return 0.0 }
// Rate15 is a no-op.
func (NilMeter) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilMeter) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilMeter) Snapshot() Meter { return NilMeter{} }
// Stop is a no-op.
func (NilMeter) Stop() {}
func (NilMeter) Count() int64 { return 0 }
func (NilMeter) Mark(n int64) {}
func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) }
func (NilMeter) Stop() {}
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
lock sync.RWMutex
snapshot *MeterSnapshot
count atomic.Int64
uncounted atomic.Int64 // not yet added to the EWMAs
rateMean atomic.Uint64
a1, a5, a15 EWMA
startTime time.Time
stopped atomic.Bool
@ -184,7 +115,6 @@ type StandardMeter struct {
func newStandardMeter() *StandardMeter {
return &StandardMeter{
snapshot: &MeterSnapshot{},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
@ -194,97 +124,42 @@ func newStandardMeter() *StandardMeter {
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() {
stopped := m.stopped.Swap(true)
if !stopped {
if stopped := m.stopped.Swap(true); !stopped {
arbiter.Lock()
delete(arbiter.meters, m)
arbiter.Unlock()
}
}
// Count returns the number of events recorded.
// It updates the meter to be as accurate as possible
func (m *StandardMeter) Count() int64 {
m.lock.Lock()
defer m.lock.Unlock()
m.updateMeter()
return m.snapshot.count
}
// Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) {
m.snapshot.temp.Add(n)
}
// Rate1 returns the one-minute moving average rate of events per second.
func (m *StandardMeter) Rate1() float64 {
m.lock.RLock()
defer m.lock.RUnlock()
return m.snapshot.rate1
}
// Rate5 returns the five-minute moving average rate of events per second.
func (m *StandardMeter) Rate5() float64 {
m.lock.RLock()
defer m.lock.RUnlock()
return m.snapshot.rate5
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (m *StandardMeter) Rate15() float64 {
m.lock.RLock()
defer m.lock.RUnlock()
return m.snapshot.rate15
}
// RateMean returns the meter's mean rate of events per second.
func (m *StandardMeter) RateMean() float64 {
m.lock.RLock()
defer m.lock.RUnlock()
return m.snapshot.rateMean
m.uncounted.Add(n)
}
// Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() Meter {
m.lock.RLock()
snapshot := MeterSnapshot{
count: m.snapshot.count,
rate1: m.snapshot.rate1,
rate5: m.snapshot.rate5,
rate15: m.snapshot.rate15,
rateMean: m.snapshot.rateMean,
func (m *StandardMeter) Snapshot() MeterSnapshot {
return &meterSnapshot{
count: m.count.Load() + m.uncounted.Load(),
rate1: m.a1.Snapshot().Rate(),
rate5: m.a5.Snapshot().Rate(),
rate15: m.a15.Snapshot().Rate(),
rateMean: math.Float64frombits(m.rateMean.Load()),
}
snapshot.temp.Store(m.snapshot.temp.Load())
m.lock.RUnlock()
return &snapshot
}
func (m *StandardMeter) updateSnapshot() {
// should run with write lock held on m.lock
snapshot := m.snapshot
snapshot.rate1 = m.a1.Rate()
snapshot.rate5 = m.a5.Rate()
snapshot.rate15 = m.a15.Rate()
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
}
func (m *StandardMeter) updateMeter() {
// should only run with write lock held on m.lock
n := m.snapshot.temp.Swap(0)
m.snapshot.count += n
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
}
func (m *StandardMeter) tick() {
m.lock.Lock()
defer m.lock.Unlock()
m.updateMeter()
// Take the uncounted values, add to count
n := m.uncounted.Swap(0)
count := m.count.Add(n)
m.rateMean.Store(math.Float64bits(float64(count) / time.Since(m.startTime).Seconds()))
// Update the EWMA's internal state
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
// And trigger them to calculate the rates
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
m.updateSnapshot()
}
// meterArbiter ticks meters every 5s from a single goroutine.

View File

@ -12,11 +12,17 @@ func BenchmarkMeter(b *testing.B) {
m.Mark(1)
}
}
func TestMeter(t *testing.T) {
m := NewMeter()
m.Mark(47)
if v := m.Snapshot().Count(); v != 47 {
t.Fatalf("have %d want %d", v, 47)
}
}
func TestGetOrRegisterMeter(t *testing.T) {
r := NewRegistry()
NewRegisteredMeter("foo", r).Mark(47)
if m := GetOrRegisterMeter("foo", r); m.Count() != 47 {
if m := GetOrRegisterMeter("foo", r).Snapshot(); m.Count() != 47 {
t.Fatal(m.Count())
}
}
@ -31,10 +37,10 @@ func TestMeterDecay(t *testing.T) {
ma.meters[m] = struct{}{}
m.Mark(1)
ma.tickMeters()
rateMean := m.RateMean()
rateMean := m.Snapshot().RateMean()
time.Sleep(100 * time.Millisecond)
ma.tickMeters()
if m.RateMean() >= rateMean {
if m.Snapshot().RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease")
}
}
@ -42,7 +48,7 @@ func TestMeterDecay(t *testing.T) {
func TestMeterNonzero(t *testing.T) {
m := NewMeter()
m.Mark(3)
if count := m.Count(); count != 3 {
if count := m.Snapshot().Count(); count != 3 {
t.Errorf("m.Count(): 3 != %v\n", count)
}
}
@ -59,16 +65,8 @@ func TestMeterStop(t *testing.T) {
}
}
func TestMeterSnapshot(t *testing.T) {
m := NewMeter()
m.Mark(1)
if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
t.Fatal(snapshot)
}
}
func TestMeterZero(t *testing.T) {
m := NewMeter()
m := NewMeter().Snapshot()
if count := m.Count(); count != 0 {
t.Errorf("m.Count(): 0 != %v\n", count)
}
@ -79,13 +77,13 @@ func TestMeterRepeat(t *testing.T) {
for i := 0; i < 101; i++ {
m.Mark(int64(i))
}
if count := m.Count(); count != 5050 {
if count := m.Snapshot().Count(); count != 5050 {
t.Errorf("m.Count(): 5050 != %v\n", count)
}
for i := 0; i < 101; i++ {
m.Mark(int64(i))
}
if count := m.Count(); count != 10100 {
if count := m.Snapshot().Count(); count != 10100 {
t.Errorf("m.Count(): 10100 != %v\n", count)
}
}

View File

@ -9,7 +9,9 @@ import (
"os"
"runtime/metrics"
"runtime/pprof"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/log"
@ -30,13 +32,35 @@ var EnabledExpensive = false
// enablerFlags is the CLI flag names to use to enable metrics collections.
var enablerFlags = []string{"metrics"}
// enablerEnvVars is the env var names to use to enable metrics collections.
var enablerEnvVars = []string{"GETH_METRICS"}
// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
var expensiveEnablerFlags = []string{"metrics.expensive"}
// expensiveEnablerEnvVars is the env var names to use to enable metrics collections.
var expensiveEnablerEnvVars = []string{"GETH_METRICS_EXPENSIVE"}
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
for _, enabler := range enablerEnvVars {
if val, found := syscall.Getenv(enabler); found && !Enabled {
if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
log.Info("Enabling metrics collection")
Enabled = true
}
}
}
for _, enabler := range expensiveEnablerEnvVars {
if val, found := syscall.Getenv(enabler); found && !EnabledExpensive {
if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
log.Info("Enabling expensive metrics collection")
EnabledExpensive = true
}
}
}
for _, arg := range os.Args {
flag := strings.TrimLeft(arg, "-")
@ -85,6 +109,12 @@ var runtimeSamples = []metrics.Sample{
{Name: "/sched/latencies:seconds"}, // histogram
}
func ReadRuntimeStats() *runtimeStats {
r := new(runtimeStats)
readRuntimeStats(r)
return r
}
func readRuntimeStats(v *runtimeStats) {
metrics.Read(runtimeSamples)
for _, s := range runtimeSamples {

View File

@ -98,8 +98,8 @@ func Example() {
t.Time(func() { time.Sleep(10 * time.Millisecond) })
t.Update(1)
fmt.Println(c.Count())
fmt.Println(t.Min())
fmt.Println(c.Snapshot().Count())
fmt.Println(t.Snapshot().Min())
// Output: 17
// 1
}

View File

@ -65,15 +65,15 @@ func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname str
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case CounterFloat64:
fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case Gauge:
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeFloat64:
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeInfo:
fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Value().String(), shortHostname)
fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})

View File

@ -1,6 +1,7 @@
package metrics
import (
"fmt"
"net"
"os"
"strings"
@ -47,5 +48,19 @@ func TestExampleOpenTSB(t *testing.T) {
}
if have, want := w.String(), string(wantB); have != want {
t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
}
}
func findFirstDiffPos(a, b string) string {
yy := strings.Split(b, "\n")
for i, x := range strings.Split(a, "\n") {
if i >= len(yy) {
return fmt.Sprintf("have:%d: %s\nwant:%d: <EOF>", i, x, i)
}
if y := yy[i]; x != y {
return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
}
}
return ""
}

View File

@ -75,27 +75,27 @@ func (c *collector) Add(name string, i any) error {
return nil
}
func (c *collector) addCounter(name string, m metrics.Counter) {
func (c *collector) addCounter(name string, m metrics.CounterSnapshot) {
c.writeGaugeCounter(name, m.Count())
}
func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64) {
func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64Snapshot) {
c.writeGaugeCounter(name, m.Count())
}
func (c *collector) addGauge(name string, m metrics.Gauge) {
func (c *collector) addGauge(name string, m metrics.GaugeSnapshot) {
c.writeGaugeCounter(name, m.Value())
}
func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) {
func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64Snapshot) {
c.writeGaugeCounter(name, m.Value())
}
func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfo) {
func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfoSnapshot) {
c.writeGaugeInfo(name, m.Value())
}
func (c *collector) addHistogram(name string, m metrics.Histogram) {
func (c *collector) addHistogram(name string, m metrics.HistogramSnapshot) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
c.writeSummaryCounter(name, m.Count())
@ -106,11 +106,11 @@ func (c *collector) addHistogram(name string, m metrics.Histogram) {
c.buff.WriteRune('\n')
}
func (c *collector) addMeter(name string, m metrics.Meter) {
func (c *collector) addMeter(name string, m metrics.MeterSnapshot) {
c.writeGaugeCounter(name, m.Count())
}
func (c *collector) addTimer(name string, m metrics.Timer) {
func (c *collector) addTimer(name string, m metrics.TimerSnapshot) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
c.writeSummaryCounter(name, m.Count())
@ -121,13 +121,12 @@ func (c *collector) addTimer(name string, m metrics.Timer) {
c.buff.WriteRune('\n')
}
func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) {
if len(m.Values()) <= 0 {
func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnapshot) {
if m.Count() <= 0 {
return
}
ps := m.Percentiles([]float64{50, 95, 99})
val := m.Values()
c.writeSummaryCounter(name, len(val))
ps := m.Percentiles([]float64{0.50, 0.95, 0.99})
c.writeSummaryCounter(name, m.Count())
c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
c.writeSummaryPercentile(name, "0.50", ps[0])
c.writeSummaryPercentile(name, "0.95", ps[1])

View File

@ -55,10 +55,10 @@ func findFirstDiffPos(a, b string) string {
yy := strings.Split(b, "\n")
for i, x := range strings.Split(a, "\n") {
if i >= len(yy) {
return fmt.Sprintf("a:%d: %s\nb:%d: <EOF>", i, x, i)
return fmt.Sprintf("have:%d: %s\nwant:%d: <EOF>", i, x, i)
}
if y := yy[i]; x != y {
return fmt.Sprintf("a:%d: %s\nb:%d: %s", i, x, i, y)
return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
}
}
return ""

View File

@ -1,3 +1,25 @@
# TYPE system_cpu_schedlatency_count counter
system_cpu_schedlatency_count 5645
# TYPE system_cpu_schedlatency summary
system_cpu_schedlatency {quantile="0.5"} 0
system_cpu_schedlatency {quantile="0.75"} 7168
system_cpu_schedlatency {quantile="0.95"} 1.6777216e+07
system_cpu_schedlatency {quantile="0.99"} 2.9360128e+07
system_cpu_schedlatency {quantile="0.999"} 3.3554432e+07
system_cpu_schedlatency {quantile="0.9999"} 3.3554432e+07
# TYPE system_memory_pauses_count counter
system_memory_pauses_count 14
# TYPE system_memory_pauses summary
system_memory_pauses {quantile="0.5"} 32768
system_memory_pauses {quantile="0.75"} 57344
system_memory_pauses {quantile="0.95"} 196608
system_memory_pauses {quantile="0.99"} 196608
system_memory_pauses {quantile="0.999"} 196608
system_memory_pauses {quantile="0.9999"} 196608
# TYPE test_counter gauge
test_counter 12345
@ -31,9 +53,9 @@ test_meter 0
test_resetting_timer_count 6
# TYPE test_resetting_timer summary
test_resetting_timer {quantile="0.50"} 12000000
test_resetting_timer {quantile="0.95"} 120000000
test_resetting_timer {quantile="0.99"} 120000000
test_resetting_timer {quantile="0.50"} 1.25e+07
test_resetting_timer {quantile="0.95"} 1.2e+08
test_resetting_timer {quantile="0.99"} 1.2e+08
# TYPE test_timer_count counter
test_timer_count 6

View File

@ -150,13 +150,13 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
values := make(map[string]interface{})
switch metric := i.(type) {
case Counter:
values["count"] = metric.Count()
values["count"] = metric.Snapshot().Count()
case CounterFloat64:
values["count"] = metric.Count()
values["count"] = metric.Snapshot().Count()
case Gauge:
values["value"] = metric.Value()
values["value"] = metric.Snapshot().Value()
case GaugeFloat64:
values["value"] = metric.Value()
values["value"] = metric.Snapshot().Value()
case Healthcheck:
values["error"] = nil
metric.Check()

View File

@ -85,11 +85,11 @@ func TestRegistryDuplicate(t *testing.T) {
func TestRegistryGet(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
if count := r.Get("foo").(Counter).Count(); count != 0 {
if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 {
t.Fatal(count)
}
r.Get("foo").(Counter).Inc(1)
if count := r.Get("foo").(Counter).Count(); count != 1 {
if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 {
t.Fatal(count)
}
}

View File

@ -17,7 +17,7 @@ type resettingSample struct {
}
// Snapshot returns a read-only copy of the sample with the original reset.
func (rs *resettingSample) Snapshot() Sample {
func (rs *resettingSample) Snapshot() SampleSnapshot {
s := rs.Sample.Snapshot()
rs.Sample.Clear()
return s

View File

@ -1,22 +1,24 @@
package metrics
import (
"math"
"sync"
"time"
"golang.org/x/exp/slices"
)
// Initial slice capacity for the values stored in a ResettingTimer
const InitialResettingTimerSliceCap = 10
type ResettingTimerSnapshot interface {
Count() int
Mean() float64
Max() int64
Min() int64
Percentiles([]float64) []float64
}
// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
type ResettingTimer interface {
Values() []int64
Snapshot() ResettingTimer
Percentiles([]float64) []int64
Mean() float64
Snapshot() ResettingTimerSnapshot
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
@ -52,70 +54,40 @@ func NewResettingTimer() ResettingTimer {
}
// NilResettingTimer is a no-op ResettingTimer.
type NilResettingTimer struct {
}
type NilResettingTimer struct{}
// Values is a no-op.
func (NilResettingTimer) Values() []int64 { return nil }
// Snapshot is a no-op.
func (NilResettingTimer) Snapshot() ResettingTimer {
return &ResettingTimerSnapshot{
values: []int64{},
}
}
// Time is a no-op.
func (NilResettingTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilResettingTimer) Update(time.Duration) {}
// Percentiles panics.
func (NilResettingTimer) Percentiles([]float64) []int64 {
panic("Percentiles called on a NilResettingTimer")
}
// Mean panics.
func (NilResettingTimer) Mean() float64 {
panic("Mean called on a NilResettingTimer")
}
// UpdateSince is a no-op.
func (NilResettingTimer) UpdateSince(time.Time) {}
func (NilResettingTimer) Values() []int64 { return nil }
func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n }
func (NilResettingTimer) Time(f func()) { f() }
func (NilResettingTimer) Update(time.Duration) {}
func (NilResettingTimer) Percentiles([]float64) []float64 { return nil }
func (NilResettingTimer) Mean() float64 { return 0.0 }
func (NilResettingTimer) Max() int64 { return 0 }
func (NilResettingTimer) Min() int64 { return 0 }
func (NilResettingTimer) UpdateSince(time.Time) {}
func (NilResettingTimer) Count() int { return 0 }
// StandardResettingTimer is the standard implementation of a ResettingTimer.
// and Meter.
type StandardResettingTimer struct {
values []int64
mutex sync.Mutex
}
sum int64 // sum is a running count of the total sum, used later to calculate mean
// Values returns a slice with all measurements.
func (t *StandardResettingTimer) Values() []int64 {
return t.values
mutex sync.Mutex
}
// Snapshot resets the timer and returns a read-only copy of its contents.
func (t *StandardResettingTimer) Snapshot() ResettingTimer {
func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
currentValues := t.values
t.values = make([]int64, 0, InitialResettingTimerSliceCap)
return &ResettingTimerSnapshot{
values: currentValues,
snapshot := &resettingTimerSnapshot{}
if len(t.values) > 0 {
snapshot.mean = float64(t.sum) / float64(len(t.values))
snapshot.values = t.values
t.values = make([]int64, 0, InitialResettingTimerSliceCap)
}
}
// Percentiles panics.
func (t *StandardResettingTimer) Percentiles([]float64) []int64 {
panic("Percentiles called on a StandardResettingTimer")
}
// Mean panics.
func (t *StandardResettingTimer) Mean() float64 {
panic("Mean called on a StandardResettingTimer")
t.sum = 0
return snapshot
}
// Record the duration of the execution of the given function.
@ -130,106 +102,70 @@ func (t *StandardResettingTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.values = append(t.values, int64(d))
t.sum += int64(d)
}
// Record the duration of an event that started at a time and ends now.
func (t *StandardResettingTimer) UpdateSince(ts time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.values = append(t.values, int64(time.Since(ts)))
t.Update(time.Since(ts))
}
// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
type ResettingTimerSnapshot struct {
// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
type resettingTimerSnapshot struct {
values []int64
mean float64
thresholdBoundaries []int64
max int64
min int64
thresholdBoundaries []float64
calculated bool
}
// Snapshot returns the snapshot.
func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t }
// Time panics.
func (*ResettingTimerSnapshot) Time(func()) {
panic("Time called on a ResettingTimerSnapshot")
}
// Update panics.
func (*ResettingTimerSnapshot) Update(time.Duration) {
panic("Update called on a ResettingTimerSnapshot")
}
// UpdateSince panics.
func (*ResettingTimerSnapshot) UpdateSince(time.Time) {
panic("UpdateSince called on a ResettingTimerSnapshot")
}
// Values returns all values from snapshot.
func (t *ResettingTimerSnapshot) Values() []int64 {
return t.values
// Count return the length of the values from snapshot.
func (t *resettingTimerSnapshot) Count() int {
return len(t.values)
}
// Percentiles returns the boundaries for the input percentiles.
func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 {
// note: this method is not thread safe
func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 {
t.calc(percentiles)
return t.thresholdBoundaries
}
// Mean returns the mean of the snapshotted values
func (t *ResettingTimerSnapshot) Mean() float64 {
// note: this method is not thread safe
func (t *resettingTimerSnapshot) Mean() float64 {
if !t.calculated {
t.calc([]float64{})
t.calc(nil)
}
return t.mean
}
func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
slices.Sort(t.values)
count := len(t.values)
if count > 0 {
min := t.values[0]
max := t.values[count-1]
cumulativeValues := make([]int64, count)
cumulativeValues[0] = min
for i := 1; i < count; i++ {
cumulativeValues[i] = t.values[i] + cumulativeValues[i-1]
}
t.thresholdBoundaries = make([]int64, len(percentiles))
thresholdBoundary := max
for i, pct := range percentiles {
if count > 1 {
var abs float64
if pct >= 0 {
abs = pct
} else {
abs = 100 + pct
}
// poor man's math.Round(x):
// math.Floor(x + 0.5)
indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5))
if pct >= 0 && indexOfPerc > 0 {
indexOfPerc -= 1 // index offset=0
}
thresholdBoundary = t.values[indexOfPerc]
}
t.thresholdBoundaries[i] = thresholdBoundary
}
sum := cumulativeValues[count-1]
t.mean = float64(sum) / float64(count)
} else {
t.thresholdBoundaries = make([]int64, len(percentiles))
t.mean = 0
// Max returns the max of the snapshotted values
// note: this method is not thread safe
func (t *resettingTimerSnapshot) Max() int64 {
if !t.calculated {
t.calc(nil)
}
t.calculated = true
return t.max
}
// Min returns the min of the snapshotted values
// note: this method is not thread safe
func (t *resettingTimerSnapshot) Min() int64 {
if !t.calculated {
t.calc(nil)
}
return t.min
}
func (t *resettingTimerSnapshot) calc(percentiles []float64) {
scores := CalculatePercentiles(t.values, percentiles)
t.thresholdBoundaries = scores
if len(t.values) == 0 {
return
}
t.min = t.values[0]
t.max = t.values[len(t.values)-1]
}

View File

@ -10,9 +10,9 @@ func TestResettingTimer(t *testing.T) {
values []int64
start int
end int
wantP50 int64
wantP95 int64
wantP99 int64
wantP50 float64
wantP95 float64
wantP99 float64
wantMean float64
wantMin int64
wantMax int64
@ -21,14 +21,14 @@ func TestResettingTimer(t *testing.T) {
values: []int64{},
start: 1,
end: 11,
wantP50: 5, wantP95: 10, wantP99: 10,
wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
{
values: []int64{},
start: 1,
end: 101,
wantP50: 50, wantP95: 95, wantP99: 99,
wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
wantMin: 1, wantMax: 100, wantMean: 50.5,
},
{
@ -56,11 +56,11 @@ func TestResettingTimer(t *testing.T) {
values: []int64{1, 10},
start: 0,
end: 0,
wantP50: 1, wantP95: 10, wantP99: 10,
wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
}
for ind, tt := range tests {
for i, tt := range tests {
timer := NewResettingTimer()
for i := tt.start; i < tt.end; i++ {
@ -70,37 +70,27 @@ func TestResettingTimer(t *testing.T) {
for _, v := range tt.values {
timer.Update(time.Duration(v))
}
snap := timer.Snapshot()
ps := snap.Percentiles([]float64{50, 95, 99})
ps := snap.Percentiles([]float64{0.50, 0.95, 0.99})
val := snap.Values()
if len(val) > 0 {
if tt.wantMin != val[0] {
t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
}
if tt.wantMax != val[len(val)-1] {
t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
}
if have, want := snap.Min(), tt.wantMin; have != want {
t.Fatalf("%d: min: have %d, want %d", i, have, want)
}
if tt.wantMean != snap.Mean() {
t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
if have, want := snap.Max(), tt.wantMax; have != want {
t.Fatalf("%d: max: have %d, want %d", i, have, want)
}
if tt.wantP50 != ps[0] {
t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50)
if have, want := snap.Mean(), tt.wantMean; have != want {
t.Fatalf("%d: mean: have %v, want %v", i, have, want)
}
if tt.wantP95 != ps[1] {
t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95)
if have, want := ps[0], tt.wantP50; have != want {
t.Errorf("%d: p50: have %v, want %v", i, have, want)
}
if tt.wantP99 != ps[2] {
t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99)
if have, want := ps[1], tt.wantP95; have != want {
t.Errorf("%d: p95: have %v, want %v", i, have, want)
}
if have, want := ps[2], tt.wantP99; have != want {
t.Errorf("%d: p99: have %v, want %v", i, have, want)
}
}
}
@ -110,11 +100,11 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) {
values []int64
start int
end int
wantP05 int64
wantP20 int64
wantP50 int64
wantP95 int64
wantP99 int64
wantP05 float64
wantP20 float64
wantP50 float64
wantP95 float64
wantP99 float64
wantMean float64
wantMin int64
wantMax int64
@ -123,14 +113,14 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) {
values: []int64{},
start: 1,
end: 11,
wantP05: 1, wantP20: 2, wantP50: 5, wantP95: 10, wantP99: 10,
wantP05: 1, wantP20: 2.2, wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
{
values: []int64{},
start: 1,
end: 101,
wantP05: 5, wantP20: 20, wantP50: 50, wantP95: 95, wantP99: 99,
wantP05: 5.050000000000001, wantP20: 20.200000000000003, wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
wantMin: 1, wantMax: 100, wantMean: 50.5,
},
{
@ -158,7 +148,7 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) {
values: []int64{1, 10},
start: 0,
end: 0,
wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 10, wantP99: 10,
wantP05: 1, wantP20: 1, wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
}
@ -175,42 +165,33 @@ func TestResettingTimerWithFivePercentiles(t *testing.T) {
snap := timer.Snapshot()
ps := snap.Percentiles([]float64{5, 20, 50, 95, 99})
ps := snap.Percentiles([]float64{0.05, 0.20, 0.50, 0.95, 0.99})
val := snap.Values()
if tt.wantMin != snap.Min() {
t.Errorf("%d: min: got %d, want %d", ind, snap.Min(), tt.wantMin)
}
if len(val) > 0 {
if tt.wantMin != val[0] {
t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
}
if tt.wantMax != val[len(val)-1] {
t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
}
if tt.wantMax != snap.Max() {
t.Errorf("%d: max: got %d, want %d", ind, snap.Max(), tt.wantMax)
}
if tt.wantMean != snap.Mean() {
t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
t.Errorf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
}
if tt.wantP05 != ps[0] {
t.Fatalf("%d: p05: got %d, want %d", ind, ps[0], tt.wantP05)
t.Errorf("%d: p05: got %v, want %v", ind, ps[0], tt.wantP05)
}
if tt.wantP20 != ps[1] {
t.Fatalf("%d: p20: got %d, want %d", ind, ps[1], tt.wantP20)
t.Errorf("%d: p20: got %v, want %v", ind, ps[1], tt.wantP20)
}
if tt.wantP50 != ps[2] {
t.Fatalf("%d: p50: got %d, want %d", ind, ps[2], tt.wantP50)
t.Errorf("%d: p50: got %v, want %v", ind, ps[2], tt.wantP50)
}
if tt.wantP95 != ps[3] {
t.Fatalf("%d: p95: got %d, want %d", ind, ps[3], tt.wantP95)
t.Errorf("%d: p95: got %v, want %v", ind, ps[3], tt.wantP95)
}
if tt.wantP99 != ps[4] {
t.Fatalf("%d: p99: got %d, want %d", ind, ps[4], tt.wantP99)
t.Errorf("%d: p99: got %v, want %v", ind, ps[4], tt.wantP99)
}
}
}

View File

@ -17,13 +17,19 @@ func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runt
// runtimeHistogram wraps a runtime/metrics histogram.
type runtimeHistogram struct {
v atomic.Value
v atomic.Value // v is a pointer to a metrics.Float64Histogram
scaleFactor float64
}
func newRuntimeHistogram(scale float64) *runtimeHistogram {
h := &runtimeHistogram{scaleFactor: scale}
h.update(&metrics.Float64Histogram{})
h.update(new(metrics.Float64Histogram))
return h
}
func RuntimeHistogramFromData(scale float64, hist *metrics.Float64Histogram) *runtimeHistogram {
h := &runtimeHistogram{scaleFactor: scale}
h.update(hist)
return h
}
@ -35,130 +41,107 @@ func (h *runtimeHistogram) update(mh *metrics.Float64Histogram) {
return
}
s := runtimeHistogramSnapshot{
s := metrics.Float64Histogram{
Counts: make([]uint64, len(mh.Counts)),
Buckets: make([]float64, len(mh.Buckets)),
}
copy(s.Counts, mh.Counts)
copy(s.Buckets, mh.Buckets)
for i, b := range s.Buckets {
for i, b := range mh.Buckets {
s.Buckets[i] = b * h.scaleFactor
}
h.v.Store(&s)
}
func (h *runtimeHistogram) load() *runtimeHistogramSnapshot {
return h.v.Load().(*runtimeHistogramSnapshot)
}
func (h *runtimeHistogram) Clear() {
panic("runtimeHistogram does not support Clear")
}
func (h *runtimeHistogram) Update(int64) {
panic("runtimeHistogram does not support Update")
}
func (h *runtimeHistogram) Sample() Sample {
return NilSample{}
// Snapshot returns a non-changing copy of the histogram.
func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
hist := h.v.Load().(*metrics.Float64Histogram)
return newRuntimeHistogramSnapshot(hist)
}
// Snapshot returns a non-changing cop of the histogram.
func (h *runtimeHistogram) Snapshot() Histogram {
return h.load()
type runtimeHistogramSnapshot struct {
internal *metrics.Float64Histogram
calculated bool
// The following fields are (lazily) calculated based on 'internal'
mean float64
count int64
min int64 // min is the lowest sample value.
max int64 // max is the highest sample value.
variance float64
}
// Count returns the sample count.
func (h *runtimeHistogram) Count() int64 {
return h.load().Count()
func newRuntimeHistogramSnapshot(h *metrics.Float64Histogram) *runtimeHistogramSnapshot {
return &runtimeHistogramSnapshot{
internal: h,
}
}
// Mean returns an approximation of the mean.
func (h *runtimeHistogram) Mean() float64 {
return h.load().Mean()
}
// StdDev approximates the standard deviation of the histogram.
func (h *runtimeHistogram) StdDev() float64 {
return h.load().StdDev()
}
// Variance approximates the variance of the histogram.
func (h *runtimeHistogram) Variance() float64 {
return h.load().Variance()
}
// Percentile computes the p'th percentile value.
func (h *runtimeHistogram) Percentile(p float64) float64 {
return h.load().Percentile(p)
}
// Percentiles computes all requested percentile values.
func (h *runtimeHistogram) Percentiles(ps []float64) []float64 {
return h.load().Percentiles(ps)
}
// Max returns the highest sample value.
func (h *runtimeHistogram) Max() int64 {
return h.load().Max()
}
// Min returns the lowest sample value.
func (h *runtimeHistogram) Min() int64 {
return h.load().Min()
}
// Sum returns the sum of all sample values.
func (h *runtimeHistogram) Sum() int64 {
return h.load().Sum()
}
type runtimeHistogramSnapshot metrics.Float64Histogram
func (h *runtimeHistogramSnapshot) Clear() {
panic("runtimeHistogram does not support Clear")
}
func (h *runtimeHistogramSnapshot) Update(int64) {
panic("runtimeHistogram does not support Update")
}
func (h *runtimeHistogramSnapshot) Sample() Sample {
return NilSample{}
}
func (h *runtimeHistogramSnapshot) Snapshot() Histogram {
return h
// calc calculates the values for the snapshot. This method is not threadsafe.
func (h *runtimeHistogramSnapshot) calc() {
h.calculated = true
var (
count int64 // number of samples
sum float64 // approx sum of all sample values
min int64
max float64
)
if len(h.internal.Counts) == 0 {
return
}
for i, c := range h.internal.Counts {
if c == 0 {
continue
}
if count == 0 { // Set min only first loop iteration
min = int64(math.Floor(h.internal.Buckets[i]))
}
count += int64(c)
sum += h.midpoint(i) * float64(c)
// Set max on every iteration
edge := h.internal.Buckets[i+1]
if math.IsInf(edge, 1) {
edge = h.internal.Buckets[i]
}
if edge > max {
max = edge
}
}
h.min = min
h.max = int64(max)
h.mean = sum / float64(count)
h.count = count
}
// Count returns the sample count.
func (h *runtimeHistogramSnapshot) Count() int64 {
var count int64
for _, c := range h.Counts {
count += int64(c)
if !h.calculated {
h.calc()
}
return count
return h.count
}
// Size returns the size of the sample at the time the snapshot was taken.
func (h *runtimeHistogramSnapshot) Size() int {
return len(h.internal.Counts)
}
// Mean returns an approximation of the mean.
func (h *runtimeHistogramSnapshot) Mean() float64 {
if len(h.Counts) == 0 {
return 0
if !h.calculated {
h.calc()
}
mean, _ := h.mean()
return mean
}
// mean computes the mean and also the total sample count.
func (h *runtimeHistogramSnapshot) mean() (mean, totalCount float64) {
var sum float64
for i, c := range h.Counts {
midpoint := h.midpoint(i)
sum += midpoint * float64(c)
totalCount += float64(c)
}
return sum / totalCount, totalCount
return h.mean
}
func (h *runtimeHistogramSnapshot) midpoint(bucket int) float64 {
high := h.Buckets[bucket+1]
low := h.Buckets[bucket]
high := h.internal.Buckets[bucket+1]
low := h.internal.Buckets[bucket]
if math.IsInf(high, 1) {
// The edge of the highest bucket can be +Inf, and it's supposed to mean that this
// bucket contains all remaining samples > low. We can't get the middle of an
@ -180,23 +163,31 @@ func (h *runtimeHistogramSnapshot) StdDev() float64 {
// Variance approximates the variance of the histogram.
func (h *runtimeHistogramSnapshot) Variance() float64 {
if len(h.Counts) == 0 {
if len(h.internal.Counts) == 0 {
return 0
}
mean, totalCount := h.mean()
if totalCount <= 1 {
if !h.calculated {
h.calc()
}
if h.count <= 1 {
// There is no variance when there are zero or one items.
return 0
}
// Variance is not calculated in 'calc', because it requires a second iteration.
// Therefore we calculate it lazily in this method, triggered either by
// a direct call to Variance or via StdDev.
if h.variance != 0.0 {
return h.variance
}
var sum float64
for i, c := range h.Counts {
for i, c := range h.internal.Counts {
midpoint := h.midpoint(i)
d := midpoint - mean
d := midpoint - h.mean
sum += float64(c) * (d * d)
}
return sum / (totalCount - 1)
h.variance = sum / float64(h.count-1)
return h.variance
}
// Percentile computes the p'th percentile value.
@ -231,11 +222,11 @@ func (h *runtimeHistogramSnapshot) Percentiles(ps []float64) []float64 {
func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) {
var totalCount float64
for i, count := range h.Counts {
for i, count := range h.internal.Counts {
totalCount += float64(count)
for len(thresh) > 0 && thresh[0] < totalCount {
thresh[0] = h.Buckets[i]
thresh[0] = h.internal.Buckets[i]
thresh = thresh[1:]
}
if len(thresh) == 0 {
@ -250,34 +241,25 @@ func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) {
// Max returns the highest sample value.
func (h *runtimeHistogramSnapshot) Max() int64 {
for i := len(h.Counts) - 1; i >= 0; i-- {
count := h.Counts[i]
if count > 0 {
edge := h.Buckets[i+1]
if math.IsInf(edge, 1) {
edge = h.Buckets[i]
}
return int64(math.Ceil(edge))
}
if !h.calculated {
h.calc()
}
return 0
return h.max
}
// Min returns the lowest sample value.
func (h *runtimeHistogramSnapshot) Min() int64 {
for i, count := range h.Counts {
if count > 0 {
return int64(math.Floor(h.Buckets[i]))
}
if !h.calculated {
h.calc()
}
return 0
return h.min
}
// Sum returns the sum of all sample values.
func (h *runtimeHistogramSnapshot) Sum() int64 {
var sum float64
for i := range h.Counts {
sum += h.Buckets[i] * float64(h.Counts[i])
for i := range h.internal.Counts {
sum += h.internal.Buckets[i] * float64(h.internal.Counts[i])
}
return int64(math.Ceil(sum))
}

View File

@ -1,11 +1,14 @@
package metrics
import (
"bytes"
"encoding/gob"
"fmt"
"math"
"reflect"
"runtime/metrics"
"testing"
"time"
)
var _ Histogram = (*runtimeHistogram)(nil)
@ -74,7 +77,7 @@ func TestRuntimeHistogramStats(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
s := runtimeHistogramSnapshot(test.h)
s := RuntimeHistogramFromData(1.0, &test.h).Snapshot()
if v := s.Count(); v != test.Count {
t.Errorf("Count() = %v, want %v", v, test.Count)
@ -121,13 +124,39 @@ func approxEqual(x, y, ε float64) bool {
// This test verifies that requesting Percentiles in unsorted order
// returns them in the requested order.
func TestRuntimeHistogramStatsPercentileOrder(t *testing.T) {
p := runtimeHistogramSnapshot{
s := RuntimeHistogramFromData(1.0, &metrics.Float64Histogram{
Counts: []uint64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
}
result := p.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2})
}).Snapshot()
result := s.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2})
expected := []float64{10, 2, 5, 1, 2}
if !reflect.DeepEqual(result, expected) {
t.Fatal("wrong result:", result)
}
}
func BenchmarkRuntimeHistogramSnapshotRead(b *testing.B) {
var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
dserialize := func(data string) *metrics.Float64Histogram {
var res metrics.Float64Histogram
if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
panic(err)
}
return &res
}
latency := RuntimeHistogramFromData(float64(time.Second), dserialize(sLatency))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
snap := latency.Snapshot()
// These are the fields that influxdb accesses
_ = snap.Count()
_ = snap.Max()
_ = snap.Mean()
_ = snap.Min()
_ = snap.StdDev()
_ = snap.Variance()
_ = snap.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
}
}

View File

@ -11,10 +11,7 @@ import (
const rescaleThreshold = time.Hour
// Samples maintain a statistically-significant selection of values from
// a stream.
type Sample interface {
Clear()
type SampleSnapshot interface {
Count() int64
Max() int64
Mean() float64
@ -22,14 +19,19 @@ type Sample interface {
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
Snapshot() Sample
StdDev() float64
Sum() int64
Update(int64)
Values() []int64
Variance() float64
}
// Samples maintain a statistically-significant selection of values from
// a stream.
type Sample interface {
Snapshot() SampleSnapshot
Clear()
Update(int64)
}
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
// Decay Model for Streaming Systems".
@ -77,72 +79,29 @@ func (s *ExpDecaySample) Clear() {
s.values.Clear()
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *ExpDecaySample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *ExpDecaySample) Max() int64 {
return SampleMax(s.Values())
}
// Mean returns the mean of the values in the sample.
func (s *ExpDecaySample) Mean() float64 {
return SampleMean(s.Values())
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *ExpDecaySample) Min() int64 {
return SampleMin(s.Values())
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *ExpDecaySample) Percentile(p float64) float64 {
return SamplePercentile(s.Values(), p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.Values(), ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *ExpDecaySample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.values.Size()
}
// Snapshot returns a read-only copy of the sample.
func (s *ExpDecaySample) Snapshot() Sample {
func (s *ExpDecaySample) Snapshot() SampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
vals := s.values.Values()
values := make([]int64, len(vals))
for i, v := range vals {
values[i] = v.v
var (
samples = s.values.Values()
values = make([]int64, len(samples))
max int64 = math.MinInt64
min int64 = math.MaxInt64
sum int64
)
for i, item := range samples {
v := item.v
values[i] = v
sum += v
if v > max {
max = v
}
if v < min {
min = v
}
}
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *ExpDecaySample) StdDev() float64 {
return SampleStdDev(s.Values())
}
// Sum returns the sum of the values in the sample.
func (s *ExpDecaySample) Sum() int64 {
return SampleSum(s.Values())
return newSampleSnapshotPrecalculated(s.count, values, min, max, sum)
}
// Update samples a new value.
@ -150,23 +109,6 @@ func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
// Values returns a copy of the values in the sample.
func (s *ExpDecaySample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
vals := s.values.Values()
values := make([]int64, len(vals))
for i, v := range vals {
values[i] = v.v
}
return values
}
// Variance returns the variance of the values in the sample.
func (s *ExpDecaySample) Variance() float64 {
return SampleVariance(s.Values())
}
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
@ -202,207 +144,160 @@ func (s *ExpDecaySample) update(t time.Time, v int64) {
// NilSample is a no-op Sample.
type NilSample struct{}
// Clear is a no-op.
func (NilSample) Clear() {}
// Count is a no-op.
func (NilSample) Count() int64 { return 0 }
// Max is a no-op.
func (NilSample) Max() int64 { return 0 }
// Mean is a no-op.
func (NilSample) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilSample) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilSample) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilSample) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Size is a no-op.
func (NilSample) Size() int { return 0 }
// Sample is a no-op.
func (NilSample) Snapshot() Sample { return NilSample{} }
// StdDev is a no-op.
func (NilSample) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilSample) Sum() int64 { return 0 }
// Update is a no-op.
func (NilSample) Update(v int64) {}
// Values is a no-op.
func (NilSample) Values() []int64 { return []int64{} }
// Variance is a no-op.
func (NilSample) Variance() float64 { return 0.0 }
// SampleMax returns the maximum value of the slice of int64.
func SampleMax(values []int64) int64 {
if len(values) == 0 {
return 0
}
var max int64 = math.MinInt64
for _, v := range values {
if max < v {
max = v
}
}
return max
}
// SampleMean returns the mean value of the slice of int64.
func SampleMean(values []int64) float64 {
if len(values) == 0 {
return 0.0
}
return float64(SampleSum(values)) / float64(len(values))
}
// SampleMin returns the minimum value of the slice of int64.
func SampleMin(values []int64) int64 {
if len(values) == 0 {
return 0
}
var min int64 = math.MaxInt64
for _, v := range values {
if min > v {
min = v
}
}
return min
}
func (NilSample) Clear() {}
func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) }
func (NilSample) Update(v int64) {}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values []int64, p float64) float64 {
return SamplePercentiles(values, []float64{p})[0]
return CalculatePercentiles(values, []float64{p})[0]
}
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
// int64.
func SamplePercentiles(values []int64, ps []float64) []float64 {
// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of
// int64. This method returns interpolated results, so e.g if there are only two
// values, [0, 10], a 50% percentile will land between them.
//
// Note: As a side-effect, this method will also sort the slice of values.
// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50.
func CalculatePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size > 0 {
slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
if size == 0 {
return scores
}
slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
return scores
}
// SampleSnapshot is a read-only copy of another Sample.
type SampleSnapshot struct {
// sampleSnapshot is a read-only copy of another Sample.
type sampleSnapshot struct {
count int64
values []int64
max int64
min int64
mean float64
sum int64
variance float64
}
func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
return &SampleSnapshot{
// newSampleSnapshotPrecalculated creates a read-only sampleSnapShot, using
// precalculated sums to avoid iterating the values
func newSampleSnapshotPrecalculated(count int64, values []int64, min, max, sum int64) *sampleSnapshot {
if len(values) == 0 {
return &sampleSnapshot{
count: count,
values: values,
}
}
return &sampleSnapshot{
count: count,
values: values,
max: max,
min: min,
mean: float64(sum) / float64(len(values)),
sum: sum,
}
}
// Clear panics.
func (*SampleSnapshot) Clear() {
panic("Clear called on a SampleSnapshot")
// newSampleSnapshot creates a read-only sampleSnapShot, and calculates some
// numbers.
func newSampleSnapshot(count int64, values []int64) *sampleSnapshot {
var (
max int64 = math.MinInt64
min int64 = math.MaxInt64
sum int64
)
for _, v := range values {
sum += v
if v > max {
max = v
}
if v < min {
min = v
}
}
return newSampleSnapshotPrecalculated(count, values, min, max, sum)
}
// Count returns the count of inputs at the time the snapshot was taken.
func (s *SampleSnapshot) Count() int64 { return s.count }
func (s *sampleSnapshot) Count() int64 { return s.count }
// Max returns the maximal value at the time the snapshot was taken.
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
func (s *sampleSnapshot) Max() int64 { return s.max }
// Mean returns the mean value at the time the snapshot was taken.
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
func (s *sampleSnapshot) Mean() float64 { return s.mean }
// Min returns the minimal value at the time the snapshot was taken.
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
func (s *sampleSnapshot) Min() int64 { return s.min }
// Percentile returns an arbitrary percentile of values at the time the
// snapshot was taken.
func (s *SampleSnapshot) Percentile(p float64) float64 {
func (s *sampleSnapshot) Percentile(p float64) float64 {
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values at the time
// the snapshot was taken.
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.values, ps)
func (s *sampleSnapshot) Percentiles(ps []float64) []float64 {
return CalculatePercentiles(s.values, ps)
}
// Size returns the size of the sample at the time the snapshot was taken.
func (s *SampleSnapshot) Size() int { return len(s.values) }
func (s *sampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
func (s *SampleSnapshot) Snapshot() Sample { return s }
func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s }
// StdDev returns the standard deviation of values at the time the snapshot was
// taken.
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
// Sum returns the sum of values at the time the snapshot was taken.
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
// Update panics.
func (*SampleSnapshot) Update(int64) {
panic("Update called on a SampleSnapshot")
func (s *sampleSnapshot) StdDev() float64 {
if s.variance == 0.0 {
s.variance = SampleVariance(s.mean, s.values)
}
return math.Sqrt(s.variance)
}
// Sum returns the sum of values at the time the snapshot was taken.
func (s *sampleSnapshot) Sum() int64 { return s.sum }
// Values returns a copy of the values in the sample.
func (s *SampleSnapshot) Values() []int64 {
func (s *sampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of values at the time the snapshot was taken.
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
// SampleStdDev returns the standard deviation of the slice of int64.
func SampleStdDev(values []int64) float64 {
return math.Sqrt(SampleVariance(values))
}
// SampleSum returns the sum of the slice of int64.
func SampleSum(values []int64) int64 {
var sum int64
for _, v := range values {
sum += v
func (s *sampleSnapshot) Variance() float64 {
if s.variance == 0.0 {
s.variance = SampleVariance(s.mean, s.values)
}
return sum
return s.variance
}
// SampleVariance returns the variance of the slice of int64.
func SampleVariance(values []int64) float64 {
func SampleVariance(mean float64, values []int64) float64 {
if len(values) == 0 {
return 0.0
}
m := SampleMean(values)
var sum float64
for _, v := range values {
d := float64(v) - m
d := float64(v) - mean
sum += d * d
}
return sum / float64(len(values))
@ -445,83 +340,14 @@ func (s *UniformSample) Clear() {
s.values = make([]int64, 0, s.reservoirSize)
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *UniformSample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *UniformSample) Max() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMax(s.values)
}
// Mean returns the mean of the values in the sample.
func (s *UniformSample) Mean() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMean(s.values)
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *UniformSample) Min() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMin(s.values)
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *UniformSample) Percentile(p float64) float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *UniformSample) Percentiles(ps []float64) []float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentiles(s.values, ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *UniformSample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return len(s.values)
}
// Snapshot returns a read-only copy of the sample.
func (s *UniformSample) Snapshot() Sample {
func (s *UniformSample) Snapshot() SampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *UniformSample) StdDev() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleStdDev(s.values)
}
// Sum returns the sum of the values in the sample.
func (s *UniformSample) Sum() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleSum(s.values)
count := s.count
s.mutex.Unlock()
return newSampleSnapshot(count, values)
}
// Update samples a new value.
@ -544,22 +370,6 @@ func (s *UniformSample) Update(v int64) {
}
}
// Values returns a copy of the values in the sample.
func (s *UniformSample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of the values in the sample.
func (s *UniformSample) Variance() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleVariance(s.values)
}
// expDecaySample represents an individual sample in a heap.
type expDecaySample struct {
k float64

View File

@ -8,28 +8,36 @@ import (
"time"
)
const epsilonPercentile = .00000000001
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
sum += int64(i)
}
mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
SampleVariance(mean, s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
sum += int64(i)
}
mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
SampleVariance(mean, s)
}
}
func BenchmarkCopy1000(b *testing.B) {
@ -79,65 +87,42 @@ func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
func TestExpDecaySample10(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 10; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 10 {
t.Errorf("s.Count(): 10 != %v\n", size)
}
if size := s.Size(); size != 10 {
t.Errorf("s.Size(): 10 != %v\n", size)
}
if l := len(s.Values()); l != 10 {
t.Errorf("len(s.Values()): 10 != %v\n", l)
}
for _, v := range s.Values() {
if v > 10 || v < 0 {
t.Errorf("out of range [0, 10): %v\n", v)
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func TestExpDecaySample100(t *testing.T) {
s := NewExpDecaySample(1000, 0.01)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 100 {
t.Errorf("s.Count(): 100 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 100 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
func TestExpDecaySample(t *testing.T) {
for _, tc := range []struct {
reservoirSize int
alpha float64
updates int
}{
{100, 0.99, 10},
{1000, 0.01, 100},
{100, 0.99, 1000},
} {
sample := NewExpDecaySample(tc.reservoirSize, tc.alpha)
for i := 0; i < tc.updates; i++ {
sample.Update(int64(i))
}
}
}
func TestExpDecaySample1000(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 1000): %v\n", v)
snap := sample.Snapshot()
if have, want := int(snap.Count()), tc.updates; have != want {
t.Errorf("have %d want %d", have, want)
}
if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want {
t.Errorf("have %d want %d", have, want)
}
values := snap.(*sampleSnapshot).values
if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want {
t.Errorf("have %d want %d", have, want)
}
for _, v := range values {
if v > int64(tc.updates) || v < 0 {
t.Errorf("out of range [0, %d): %v", tc.updates, v)
}
}
}
}
@ -147,15 +132,16 @@ func TestExpDecaySample1000(t *testing.T) {
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
sw := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
s.Update(10)
sw.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
s.Update(20)
sw.Update(20)
}
v := s.Values()
s := sw.Snapshot()
v := s.(*sampleSnapshot).values
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
@ -194,24 +180,27 @@ func TestExpDecaySampleStatistics(t *testing.T) {
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
testExpDecaySampleStatistics(t, s)
testExpDecaySampleStatistics(t, s.Snapshot())
}
func TestUniformSample(t *testing.T) {
s := NewUniformSample(100)
sw := NewUniformSample(100)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
sw.Update(int64(i))
}
s := sw.Snapshot()
if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
values := s.(*sampleSnapshot).values
if l := len(values); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
for _, v := range values {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
@ -219,12 +208,13 @@ func TestUniformSample(t *testing.T) {
}
func TestUniformSampleIncludesTail(t *testing.T) {
s := NewUniformSample(100)
sw := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
s.Update(int64(i))
sw.Update(int64(i))
}
v := s.Values()
s := sw.Snapshot()
v := s.(*sampleSnapshot).values
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
@ -250,7 +240,7 @@ func TestUniformSampleStatistics(t *testing.T) {
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
testUniformSampleStatistics(t, s)
testUniformSampleStatistics(t, s.Snapshot())
}
func benchmarkSample(b *testing.B, s Sample) {
@ -267,7 +257,7 @@ func benchmarkSample(b *testing.B, s Sample) {
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
@ -295,7 +285,7 @@ func testExpDecaySampleStatistics(t *testing.T, s Sample) {
}
}
func testUniformSampleStatistics(t *testing.T, s Sample) {
func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
@ -349,8 +339,22 @@ func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
}
}()
for i := 0; i < 1000; i++ {
s.Count()
s.Snapshot().Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}
func BenchmarkCalculatePercentiles(b *testing.B) {
pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
var vals []int64
for i := 0; i < 1000; i++ {
vals = append(vals, int64(rand.Int31()))
}
v := make([]int64, len(vals))
b.ResetTimer()
for i := 0; i < b.N; i++ {
copy(v, vals)
_ = CalculatePercentiles(v, pss)
}
}

View File

@ -16,15 +16,15 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count()))
case CounterFloat64:
w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Count()))
w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count()))
case Gauge:
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value()))
case GaugeFloat64:
w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value()))
case GaugeInfo:
w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Value()))
w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value()))
case Healthcheck:
metric.Check()
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))

View File

@ -1,4 +1,4 @@
put pre.elite.count 978307200 0 host=hal9000
put pre.elite.count 978307200 1337 host=hal9000
put pre.elite.one-minute 978307200 0.00 host=hal9000
put pre.elite.five-minute 978307200 0.00 host=hal9000
put pre.elite.fifteen-minute 978307200 0.00 host=hal9000

View File

@ -5,26 +5,18 @@ import (
"time"
)
type TimerSnapshot interface {
HistogramSnapshot
MeterSnapshot
}
// Timers capture the duration and rate of events.
type Timer interface {
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
Snapshot() Timer
StdDev() float64
Snapshot() TimerSnapshot
Stop()
Sum() int64
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
Variance() float64
Update(time.Duration)
}
// GetOrRegisterTimer returns an existing Timer or constructs and registers a
@ -78,61 +70,11 @@ func NewTimer() Timer {
// NilTimer is a no-op Timer.
type NilTimer struct{}
// Count is a no-op.
func (NilTimer) Count() int64 { return 0 }
// Max is a no-op.
func (NilTimer) Max() int64 { return 0 }
// Mean is a no-op.
func (NilTimer) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilTimer) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilTimer) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Rate1 is a no-op.
func (NilTimer) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilTimer) Rate5() float64 { return 0.0 }
// Rate15 is a no-op.
func (NilTimer) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilTimer) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilTimer) Snapshot() Timer { return NilTimer{} }
// StdDev is a no-op.
func (NilTimer) StdDev() float64 { return 0.0 }
// Stop is a no-op.
func (NilTimer) Stop() {}
// Sum is a no-op.
func (NilTimer) Sum() int64 { return 0 }
// Time is a no-op.
func (NilTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilTimer) Update(time.Duration) {}
// UpdateSince is a no-op.
func (NilTimer) UpdateSince(time.Time) {}
// Variance is a no-op.
func (NilTimer) Variance() float64 { return 0.0 }
func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) }
func (NilTimer) Stop() {}
func (NilTimer) Time(f func()) { f() }
func (NilTimer) Update(time.Duration) {}
func (NilTimer) UpdateSince(time.Time) {}
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
@ -142,82 +84,21 @@ type StandardTimer struct {
mutex sync.Mutex
}
// Count returns the number of events recorded.
func (t *StandardTimer) Count() int64 {
return t.histogram.Count()
}
// Max returns the maximum value in the sample.
func (t *StandardTimer) Max() int64 {
return t.histogram.Max()
}
// Mean returns the mean of the values in the sample.
func (t *StandardTimer) Mean() float64 {
return t.histogram.Mean()
}
// Min returns the minimum value in the sample.
func (t *StandardTimer) Min() int64 {
return t.histogram.Min()
}
// Percentile returns an arbitrary percentile of the values in the sample.
func (t *StandardTimer) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second.
func (t *StandardTimer) Rate1() float64 {
return t.meter.Rate1()
}
// Rate5 returns the five-minute moving average rate of events per second.
func (t *StandardTimer) Rate5() float64 {
return t.meter.Rate5()
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (t *StandardTimer) Rate15() float64 {
return t.meter.Rate15()
}
// RateMean returns the meter's mean rate of events per second.
func (t *StandardTimer) RateMean() float64 {
return t.meter.RateMean()
}
// Snapshot returns a read-only copy of the timer.
func (t *StandardTimer) Snapshot() Timer {
func (t *StandardTimer) Snapshot() TimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
return &TimerSnapshot{
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
meter: t.meter.Snapshot().(*MeterSnapshot),
return &timerSnapshot{
histogram: t.histogram.Snapshot(),
meter: t.meter.Snapshot(),
}
}
// StdDev returns the standard deviation of the values in the sample.
func (t *StandardTimer) StdDev() float64 {
return t.histogram.StdDev()
}
// Stop stops the meter.
func (t *StandardTimer) Stop() {
t.meter.Stop()
}
// Sum returns the sum in the sample.
func (t *StandardTimer) Sum() int64 {
return t.histogram.Sum()
}
// Record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) {
ts := time.Now()
@ -241,86 +122,63 @@ func (t *StandardTimer) UpdateSince(ts time.Time) {
t.meter.Mark(1)
}
// Variance returns the variance of the values in the sample.
func (t *StandardTimer) Variance() float64 {
return t.histogram.Variance()
}
// TimerSnapshot is a read-only copy of another Timer.
type TimerSnapshot struct {
histogram *HistogramSnapshot
meter *MeterSnapshot
// timerSnapshot is a read-only copy of another Timer.
type timerSnapshot struct {
histogram HistogramSnapshot
meter MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
func (t *timerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
func (t *timerSnapshot) Max() int64 { return t.histogram.Max() }
// Size returns the size of the sample at the time the snapshot was taken.
func (t *timerSnapshot) Size() int { return t.histogram.Size() }
// Mean returns the mean value at the time the snapshot was taken.
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
func (t *timerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
func (t *TimerSnapshot) Percentile(p float64) float64 {
func (t *timerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
func (t *timerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// Snapshot returns the snapshot.
func (t *TimerSnapshot) Snapshot() Timer { return t }
func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Stop is a no-op.
func (t *TimerSnapshot) Stop() {}
func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Sum returns the sum at the time the snapshot was taken.
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Time panics.
func (*TimerSnapshot) Time(func()) {
panic("Time called on a TimerSnapshot")
}
// Update panics.
func (*TimerSnapshot) Update(time.Duration) {
panic("Update called on a TimerSnapshot")
}
// UpdateSince panics.
func (*TimerSnapshot) UpdateSince(time.Time) {
panic("UpdateSince called on a TimerSnapshot")
}
func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Variance returns the variance of the values at the time the snapshot was
// taken.
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() }

View File

@ -18,7 +18,7 @@ func BenchmarkTimer(b *testing.B) {
func TestGetOrRegisterTimer(t *testing.T) {
r := NewRegistry()
NewRegisteredTimer("foo", r).Update(47)
if tm := GetOrRegisterTimer("foo", r); tm.Count() != 1 {
if tm := GetOrRegisterTimer("foo", r).Snapshot(); tm.Count() != 1 {
t.Fatal(tm)
}
}
@ -27,7 +27,7 @@ func TestTimerExtremes(t *testing.T) {
tm := NewTimer()
tm.Update(math.MaxInt64)
tm.Update(0)
if stdDev := tm.StdDev(); stdDev != 4.611686018427388e+18 {
if stdDev := tm.Snapshot().StdDev(); stdDev != 4.611686018427388e+18 {
t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
}
}
@ -56,7 +56,7 @@ func TestTimerFunc(t *testing.T) {
})
var (
drift = time.Millisecond * 2
measured = time.Duration(tm.Max())
measured = time.Duration(tm.Snapshot().Max())
ceil = actualTime + drift
floor = actualTime - drift
)
@ -66,7 +66,7 @@ func TestTimerFunc(t *testing.T) {
}
func TestTimerZero(t *testing.T) {
tm := NewTimer()
tm := NewTimer().Snapshot()
if count := tm.Count(); count != 0 {
t.Errorf("tm.Count(): 0 != %v\n", count)
}
@ -110,5 +110,5 @@ func ExampleGetOrRegisterTimer() {
m := "account.create.latency"
t := GetOrRegisterTimer(m, nil)
t.Update(47)
fmt.Println(t.Max()) // Output: 47
fmt.Println(t.Snapshot().Max()) // Output: 47
}

View File

@ -29,19 +29,19 @@ func WriteOnce(r Registry, w io.Writer) {
switch metric := namedMetric.m.(type) {
case Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", metric.Count())
fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count())
case CounterFloat64:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %f\n", metric.Count())
fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count())
case Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %9d\n", metric.Value())
fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %f\n", metric.Value())
fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value())
case GaugeInfo:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %s\n", metric.Value().String())
fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String())
case Healthcheck:
metric.Check()
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)

View File

@ -358,7 +358,7 @@ func (c *CliqueConfig) String() string {
func (c *ChainConfig) Description() string {
var banner string
// Create some basinc network config output
// Create some basic network config output
network := NetworkNames[c.ChainID.String()]
if network == "" {
network = "unknown"

View File

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 13 // Minor version component of the current release
VersionPatch = 0 // Patch version component of the current release
VersionPatch = 1 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@ -73,9 +73,8 @@ type Config struct {
func (cfg *Config) process() (code []byte, err error) {
// Load packages.
pcfg := &packages.Config{
Mode: packages.NeedName | packages.NeedTypes | packages.NeedImports | packages.NeedDeps,
Dir: cfg.Dir,
BuildFlags: []string{"-tags", "norlpgen"},
Mode: packages.NeedName | packages.NeedTypes,
Dir: cfg.Dir,
}
ps, err := packages.Load(pcfg, pathOfPackageRLP, ".")
if err != nil {
@ -117,8 +116,6 @@ func (cfg *Config) process() (code []byte, err error) {
// This is done here to avoid processing these lines with gofmt.
var header bytes.Buffer
fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n")
fmt.Fprint(&header, "//go:build !norlpgen\n")
fmt.Fprint(&header, "// +build !norlpgen\n\n")
return append(header.Bytes(), code...), nil
}