swarm/network: measure time of messages in priority queue (#19250)
This commit is contained in:
parent
c53c5e616f
commit
baded64d88
@ -40,6 +40,7 @@ var (
|
|||||||
allhosts string
|
allhosts string
|
||||||
hosts []string
|
hosts []string
|
||||||
filesize int
|
filesize int
|
||||||
|
inputSeed int
|
||||||
syncDelay int
|
syncDelay int
|
||||||
httpPort int
|
httpPort int
|
||||||
wsPort int
|
wsPort int
|
||||||
@ -74,6 +75,12 @@ func main() {
|
|||||||
Usage: "ws port",
|
Usage: "ws port",
|
||||||
Destination: &wsPort,
|
Destination: &wsPort,
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "seed",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "input seed in case we need deterministic upload",
|
||||||
|
Destination: &inputSeed,
|
||||||
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "filesize",
|
Name: "filesize",
|
||||||
Value: 1024,
|
Value: 1024,
|
||||||
|
@ -39,6 +39,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
||||||
|
// use input seed if it has been set
|
||||||
|
if inputSeed != 0 {
|
||||||
|
seed = inputSeed
|
||||||
|
}
|
||||||
|
|
||||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||||
|
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
@ -47,37 +52,28 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
|||||||
errc <- uploadAndSync(ctx, randomBytes, tuid)
|
errc <- uploadAndSync(ctx, randomBytes, tuid)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
var err error
|
||||||
select {
|
select {
|
||||||
case err := <-errc:
|
case err = <-errc:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
case <-time.After(time.Duration(timeout) * time.Second):
|
case <-time.After(time.Duration(timeout) * time.Second):
|
||||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
||||||
|
|
||||||
e := fmt.Errorf("timeout after %v sec", timeout)
|
err = fmt.Errorf("timeout after %v sec", timeout)
|
||||||
|
}
|
||||||
|
|
||||||
// trigger debug functionality on randomBytes
|
// trigger debug functionality on randomBytes
|
||||||
err := trackChunks(randomBytes[:])
|
e := trackChunks(randomBytes[:])
|
||||||
if err != nil {
|
if e != nil {
|
||||||
e = fmt.Errorf("%v; triggerChunkDebug failed: %v", e, err)
|
log.Error(e.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return e
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
// trigger debug functionality on randomBytes even on successful runs
|
|
||||||
err := trackChunks(randomBytes[:])
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func trackChunks(testData []byte) error {
|
func trackChunks(testData []byte) error {
|
||||||
log.Warn("Test timed out, running chunk debug sequence")
|
|
||||||
|
|
||||||
addrs, err := getAllRefs(testData)
|
addrs, err := getAllRefs(testData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -94,14 +90,14 @@ func trackChunks(testData []byte) error {
|
|||||||
|
|
||||||
rpcClient, err := rpc.Dial(httpHost)
|
rpcClient, err := rpc.Dial(httpHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error dialing host", "err", err)
|
log.Error("error dialing host", "err", err, "host", httpHost)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var hasInfo []api.HasInfo
|
var hasInfo []api.HasInfo
|
||||||
err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
|
err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error calling host", "err", err)
|
log.Error("error calling rpc client", "err", err, "host", httpHost)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +121,6 @@ func trackChunks(testData []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
||||||
log.Trace("Getting all references for given root hash")
|
|
||||||
datadir, err := ioutil.TempDir("", "chunk-debug")
|
datadir, err := ioutil.TempDir("", "chunk-debug")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to create temp dir: %v", err)
|
return nil, fmt.Errorf("unable to create temp dir: %v", err)
|
||||||
|
@ -91,6 +91,7 @@ func (r *reporter) makeClient() (err error) {
|
|||||||
URL: r.url,
|
URL: r.url,
|
||||||
Username: r.username,
|
Username: r.username,
|
||||||
Password: r.password,
|
Password: r.password,
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -204,24 +204,24 @@ func (f *Fetcher) run(peers *sync.Map) {
|
|||||||
|
|
||||||
// incoming request
|
// incoming request
|
||||||
case hopCount = <-f.requestC:
|
case hopCount = <-f.requestC:
|
||||||
log.Trace("new request", "request addr", f.addr)
|
|
||||||
// 2) chunk is requested, set requested flag
|
// 2) chunk is requested, set requested flag
|
||||||
// launch a request iff none been launched yet
|
// launch a request iff none been launched yet
|
||||||
doRequest = !requested
|
doRequest = !requested
|
||||||
|
log.Trace("new request", "request addr", f.addr, "doRequest", doRequest)
|
||||||
requested = true
|
requested = true
|
||||||
|
|
||||||
// peer we requested from is gone. fall back to another
|
// peer we requested from is gone. fall back to another
|
||||||
// and remove the peer from the peers map
|
// and remove the peer from the peers map
|
||||||
case id := <-gone:
|
case id := <-gone:
|
||||||
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr)
|
|
||||||
peers.Delete(id.String())
|
peers.Delete(id.String())
|
||||||
doRequest = requested
|
doRequest = requested
|
||||||
|
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr, "doRequest", doRequest)
|
||||||
|
|
||||||
// search timeout: too much time passed since the last request,
|
// search timeout: too much time passed since the last request,
|
||||||
// extend the search to a new peer if we can find one
|
// extend the search to a new peer if we can find one
|
||||||
case <-waitC:
|
case <-waitC:
|
||||||
log.Trace("search timed out: requesting", "request addr", f.addr)
|
|
||||||
doRequest = requested
|
doRequest = requested
|
||||||
|
log.Trace("search timed out: requesting", "request addr", f.addr, "doRequest", doRequest)
|
||||||
|
|
||||||
// all Fetcher context closed, can quit
|
// all Fetcher context closed, can quit
|
||||||
case <-f.ctx.Done():
|
case <-f.ctx.Done():
|
||||||
@ -288,6 +288,7 @@ func (f *Fetcher) doRequest(gone chan *enode.ID, peersToSkip *sync.Map, sources
|
|||||||
for i = 0; i < len(sources); i++ {
|
for i = 0; i < len(sources); i++ {
|
||||||
req.Source = sources[i]
|
req.Source = sources[i]
|
||||||
var err error
|
var err error
|
||||||
|
log.Trace("fetcher.doRequest", "request addr", f.addr, "peer", req.Source.String())
|
||||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// remove the peer from known sources
|
// remove the peer from known sources
|
||||||
|
@ -28,8 +28,9 @@ package priorityqueue
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -69,13 +70,16 @@ READ:
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case x := <-q:
|
case x := <-q:
|
||||||
log.Trace("priority.queue f(x)", "p", p, "len(Queues[p])", len(pq.Queues[p]))
|
val := x.(struct {
|
||||||
f(x)
|
v interface{}
|
||||||
|
t time.Time
|
||||||
|
})
|
||||||
|
f(val.v)
|
||||||
|
metrics.GetOrRegisterResettingTimer("pq.run", nil).UpdateSince(val.t)
|
||||||
p = top
|
p = top
|
||||||
default:
|
default:
|
||||||
if p > 0 {
|
if p > 0 {
|
||||||
p--
|
p--
|
||||||
log.Trace("priority.queue p > 0", "p", p)
|
|
||||||
continue READ
|
continue READ
|
||||||
}
|
}
|
||||||
p = top
|
p = top
|
||||||
@ -83,7 +87,6 @@ READ:
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-pq.wakeup:
|
case <-pq.wakeup:
|
||||||
log.Trace("priority.queue wakeup", "p", p)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -95,9 +98,15 @@ func (pq *PriorityQueue) Push(x interface{}, p int) error {
|
|||||||
if p < 0 || p >= len(pq.Queues) {
|
if p < 0 || p >= len(pq.Queues) {
|
||||||
return errBadPriority
|
return errBadPriority
|
||||||
}
|
}
|
||||||
log.Trace("priority.queue push", "p", p, "len(Queues[p])", len(pq.Queues[p]))
|
val := struct {
|
||||||
|
v interface{}
|
||||||
|
t time.Time
|
||||||
|
}{
|
||||||
|
x,
|
||||||
|
time.Now(),
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case pq.Queues[p] <- x:
|
case pq.Queues[p] <- val:
|
||||||
default:
|
default:
|
||||||
return ErrContention
|
return ErrContention
|
||||||
}
|
}
|
||||||
|
@ -185,6 +185,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
||||||
}
|
}
|
||||||
|
osp.LogFields(olog.Bool("delivered", true))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
osp.LogFields(olog.Bool("skipCheck", false))
|
osp.LogFields(olog.Bool("skipCheck", false))
|
||||||
@ -216,6 +217,10 @@ type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
|
|||||||
|
|
||||||
// chunk delivery msg is response to retrieverequest msg
|
// chunk delivery msg is response to retrieverequest msg
|
||||||
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
|
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
|
||||||
|
var osp opentracing.Span
|
||||||
|
ctx, osp = spancontext.StartSpan(
|
||||||
|
ctx,
|
||||||
|
"handle.chunk.delivery")
|
||||||
|
|
||||||
processReceivedChunksCount.Inc(1)
|
processReceivedChunksCount.Inc(1)
|
||||||
|
|
||||||
@ -223,13 +228,18 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
|
|||||||
spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
|
spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
|
||||||
span := tracing.ShiftSpanByKey(spanId)
|
span := tracing.ShiftSpanByKey(spanId)
|
||||||
|
|
||||||
|
log.Trace("handle.chunk.delivery", "ref", req.Addr, "from peer", sp.ID())
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
defer osp.Finish()
|
||||||
|
|
||||||
if span != nil {
|
if span != nil {
|
||||||
span.LogFields(olog.String("finish", "from handleChunkDeliveryMsg"))
|
span.LogFields(olog.String("finish", "from handleChunkDeliveryMsg"))
|
||||||
defer span.Finish()
|
defer span.Finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
req.peer = sp
|
req.peer = sp
|
||||||
|
log.Trace("handle.chunk.delivery", "put", req.Addr)
|
||||||
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
|
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == storage.ErrChunkInvalid {
|
if err == storage.ErrChunkInvalid {
|
||||||
@ -239,6 +249,7 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
|
|||||||
req.peer.Drop(err)
|
req.peer.Drop(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.Trace("handle.chunk.delivery", "done put", req.Addr, "err", err)
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -284,6 +295,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
|
|||||||
// this span will finish only when delivery is handled (or times out)
|
// this span will finish only when delivery is handled (or times out)
|
||||||
ctx = context.WithValue(ctx, tracing.StoreLabelId, "stream.send.request")
|
ctx = context.WithValue(ctx, tracing.StoreLabelId, "stream.send.request")
|
||||||
ctx = context.WithValue(ctx, tracing.StoreLabelMeta, fmt.Sprintf("%v.%v", sp.ID(), req.Addr))
|
ctx = context.WithValue(ctx, tracing.StoreLabelMeta, fmt.Sprintf("%v.%v", sp.ID(), req.Addr))
|
||||||
|
log.Trace("request.from.peers", "peer", sp.ID(), "ref", req.Addr)
|
||||||
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
|
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
|
||||||
Addr: req.Addr,
|
Addr: req.Addr,
|
||||||
SkipCheck: req.SkipCheck,
|
SkipCheck: req.SkipCheck,
|
||||||
|
@ -910,7 +910,7 @@ func (r *Registry) APIs() []rpc.API {
|
|||||||
Namespace: "stream",
|
Namespace: "stream",
|
||||||
Version: "3.0",
|
Version: "3.0",
|
||||||
Service: r.api,
|
Service: r.api,
|
||||||
Public: true,
|
Public: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -536,7 +536,6 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in
|
|||||||
chunkData, err := r.getter.Get(ctx, Reference(childAddress))
|
chunkData, err := r.getter.Get(ctx, Reference(childAddress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
|
metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
|
||||||
log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err)
|
|
||||||
select {
|
select {
|
||||||
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
|
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
|
||||||
case <-quitC:
|
case <-quitC:
|
||||||
@ -561,12 +560,12 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in
|
|||||||
|
|
||||||
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
||||||
func (r *LazyChunkReader) Read(b []byte) (read int, err error) {
|
func (r *LazyChunkReader) Read(b []byte) (read int, err error) {
|
||||||
log.Debug("lazychunkreader.read", "key", r.addr)
|
log.Trace("lazychunkreader.read", "key", r.addr)
|
||||||
metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1)
|
metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1)
|
||||||
|
|
||||||
read, err = r.ReadAt(b, r.off)
|
read, err = r.ReadAt(b, r.off)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
log.Debug("lazychunkreader.readat", "read", read, "err", err)
|
log.Trace("lazychunkreader.readat", "read", read, "err", err)
|
||||||
metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1)
|
metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,9 @@ func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
|
|||||||
|
|
||||||
// if chunk is now put in the store, check if there was an active fetcher and call deliver on it
|
// if chunk is now put in the store, check if there was an active fetcher and call deliver on it
|
||||||
// (this delivers the chunk to requestors via the fetcher)
|
// (this delivers the chunk to requestors via the fetcher)
|
||||||
|
log.Trace("n.getFetcher", "ref", ch.Address())
|
||||||
if f := n.getFetcher(ch.Address()); f != nil {
|
if f := n.getFetcher(ch.Address()); f != nil {
|
||||||
|
log.Trace("n.getFetcher deliver", "ref", ch.Address())
|
||||||
f.deliver(ctx, ch)
|
f.deliver(ctx, ch)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -341,5 +343,6 @@ func (f *fetcher) deliver(ctx context.Context, ch Chunk) {
|
|||||||
f.chunk = ch
|
f.chunk = ch
|
||||||
// closing the deliveredC channel will terminate ongoing requests
|
// closing the deliveredC channel will terminate ongoing requests
|
||||||
close(f.deliveredC)
|
close(f.deliveredC)
|
||||||
|
log.Trace("n.getFetcher close deliveredC", "ref", ch.Address())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -522,6 +522,8 @@ func (s *Swarm) APIs() []rpc.API {
|
|||||||
|
|
||||||
apis = append(apis, s.bzz.APIs()...)
|
apis = append(apis, s.bzz.APIs()...)
|
||||||
|
|
||||||
|
apis = append(apis, s.streamer.APIs()...)
|
||||||
|
|
||||||
if s.ps != nil {
|
if s.ps != nil {
|
||||||
apis = append(apis, s.ps.APIs()...)
|
apis = append(apis, s.ps.APIs()...)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
generated
vendored
2
vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
generated
vendored
@ -10,5 +10,5 @@ Changes by Version
|
|||||||
1.0.0 (2016-09-26)
|
1.0.0 (2016-09-26)
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
- This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec)
|
- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
|
||||||
|
|
||||||
|
18
vendor/github.com/opentracing/opentracing-go/Makefile
generated
vendored
18
vendor/github.com/opentracing/opentracing-go/Makefile
generated
vendored
@ -1,26 +1,15 @@
|
|||||||
PACKAGES := . ./mocktracer/... ./ext/...
|
|
||||||
|
|
||||||
.DEFAULT_GOAL := test-and-lint
|
.DEFAULT_GOAL := test-and-lint
|
||||||
|
|
||||||
.PHONE: test-and-lint
|
.PHONY: test-and-lint
|
||||||
|
|
||||||
test-and-lint: test lint
|
test-and-lint: test lint
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
go test -v -cover -race ./...
|
go test -v -cover -race ./...
|
||||||
|
|
||||||
|
.PHONY: cover
|
||||||
cover:
|
cover:
|
||||||
@rm -rf cover-all.out
|
go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
|
||||||
$(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;)
|
|
||||||
@grep mode: cover.out > coverage.out
|
|
||||||
@cat cover-all.out >> coverage.out
|
|
||||||
go tool cover -html=coverage.out -o cover.html
|
|
||||||
@rm -rf cover.out cover-all.out coverage.out
|
|
||||||
|
|
||||||
cover-pkg:
|
|
||||||
go test -coverprofile cover.out $(PKG)
|
|
||||||
@grep -v mode: cover.out >> cover-all.out
|
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
@ -29,4 +18,3 @@ lint:
|
|||||||
@# Run again with magic to exit non-zero if golint outputs anything.
|
@# Run again with magic to exit non-zero if golint outputs anything.
|
||||||
@! (golint ./... | read dummy)
|
@! (golint ./... | read dummy)
|
||||||
go vet ./...
|
go vet ./...
|
||||||
|
|
||||||
|
4
vendor/github.com/opentracing/opentracing-go/README.md
generated
vendored
4
vendor/github.com/opentracing/opentracing-go/README.md
generated
vendored
@ -8,8 +8,8 @@ This package is a Go platform API for OpenTracing.
|
|||||||
## Required Reading
|
## Required Reading
|
||||||
|
|
||||||
In order to understand the Go platform API, one must first be familiar with the
|
In order to understand the Go platform API, one must first be familiar with the
|
||||||
[OpenTracing project](http://opentracing.io) and
|
[OpenTracing project](https://opentracing.io) and
|
||||||
[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically.
|
[terminology](https://opentracing.io/specification/) more specifically.
|
||||||
|
|
||||||
## API overview for those adding instrumentation
|
## API overview for those adding instrumentation
|
||||||
|
|
||||||
|
18
vendor/github.com/opentracing/opentracing-go/globaltracer.go
generated
vendored
18
vendor/github.com/opentracing/opentracing-go/globaltracer.go
generated
vendored
@ -1,7 +1,12 @@
|
|||||||
package opentracing
|
package opentracing
|
||||||
|
|
||||||
|
type registeredTracer struct {
|
||||||
|
tracer Tracer
|
||||||
|
isRegistered bool
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
globalTracer Tracer = NoopTracer{}
|
globalTracer = registeredTracer{NoopTracer{}, false}
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
|
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
|
||||||
@ -11,22 +16,27 @@ var (
|
|||||||
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
|
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
|
||||||
// (etc) globals are noops.
|
// (etc) globals are noops.
|
||||||
func SetGlobalTracer(tracer Tracer) {
|
func SetGlobalTracer(tracer Tracer) {
|
||||||
globalTracer = tracer
|
globalTracer = registeredTracer{tracer, true}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GlobalTracer returns the global singleton `Tracer` implementation.
|
// GlobalTracer returns the global singleton `Tracer` implementation.
|
||||||
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
|
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
|
||||||
// implementation that drops all data handed to it.
|
// implementation that drops all data handed to it.
|
||||||
func GlobalTracer() Tracer {
|
func GlobalTracer() Tracer {
|
||||||
return globalTracer
|
return globalTracer.tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
|
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
|
||||||
func StartSpan(operationName string, opts ...StartSpanOption) Span {
|
func StartSpan(operationName string, opts ...StartSpanOption) Span {
|
||||||
return globalTracer.StartSpan(operationName, opts...)
|
return globalTracer.tracer.StartSpan(operationName, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
|
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
|
||||||
func InitGlobalTracer(tracer Tracer) {
|
func InitGlobalTracer(tracer Tracer) {
|
||||||
SetGlobalTracer(tracer)
|
SetGlobalTracer(tracer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
|
||||||
|
func IsGlobalTracerRegistered() bool {
|
||||||
|
return globalTracer.isRegistered
|
||||||
|
}
|
||||||
|
2
vendor/github.com/opentracing/opentracing-go/propagation.go
generated
vendored
2
vendor/github.com/opentracing/opentracing-go/propagation.go
generated
vendored
@ -160,7 +160,7 @@ type HTTPHeadersCarrier http.Header
|
|||||||
// Set conforms to the TextMapWriter interface.
|
// Set conforms to the TextMapWriter interface.
|
||||||
func (c HTTPHeadersCarrier) Set(key, val string) {
|
func (c HTTPHeadersCarrier) Set(key, val string) {
|
||||||
h := http.Header(c)
|
h := http.Header(c)
|
||||||
h.Add(key, val)
|
h.Set(key, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForeachKey conforms to the TextMapReader interface.
|
// ForeachKey conforms to the TextMapReader interface.
|
||||||
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -352,10 +352,10 @@
|
|||||||
"revisionTime": "2017-01-28T05:05:32Z"
|
"revisionTime": "2017-01-28T05:05:32Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "wIcN7tZiF441h08RHAm4NV8cYO4=",
|
"checksumSHA1": "a/DHmc9bdsYlZZcwp6i3xhvV7Pk=",
|
||||||
"path": "github.com/opentracing/opentracing-go",
|
"path": "github.com/opentracing/opentracing-go",
|
||||||
"revision": "bd9c3193394760d98b2fa6ebb2291f0cd1d06a7d",
|
"revision": "25a84ff92183e2f8ac018ba1db54f8a07b3c0e04",
|
||||||
"revisionTime": "2018-06-06T20:41:48Z"
|
"revisionTime": "2019-02-18T02:30:34Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "uhDxBvLEqRAMZKgpTZ8MFuLIIM8=",
|
"checksumSHA1": "uhDxBvLEqRAMZKgpTZ8MFuLIIM8=",
|
||||||
|
Loading…
Reference in New Issue
Block a user