> start) & ((1 << length) - 1)
+}
+
+func bextr32(v uint32, start, length uint8) uint32 {
+ return (v >> start) & ((1 << length) - 1)
+}
+
+func hashFunc(e []byte) uint64 {
+ return metro.Hash64(e, 1337)
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 00000000..50d95c54
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 00000000..89b81799
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 00000000..16abdfc0
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,32 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[travis]: https://travis-ci.org/cenkalti/backoff
+[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 00000000..3676ee40
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+ // NextBackOff returns the duration to wait before retrying the operation,
+ // or backoff. Stop to indicate that no more retries should be made.
+ //
+ // Example usage:
+ //
+ // duration := backoff.NextBackOff();
+ // if (duration == backoff.Stop) {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
+ //
+ NextBackOff() time.Duration
+
+ // Reset to initial state.
+ Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+ Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset() {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+ return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 00000000..48482330
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "context"
+ "time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+ BackOff
+ Context() context.Context
+}
+
+type backOffContext struct {
+ BackOff
+ ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ if b, ok := b.(*backOffContext); ok {
+ return &backOffContext{
+ BackOff: b.BackOff,
+ ctx: ctx,
+ }
+ }
+
+ return &backOffContext{
+ BackOff: b,
+ ctx: ctx,
+ }
+}
+
+func getContext(b BackOff) context.Context {
+ if cb, ok := b.(BackOffContext); ok {
+ return cb.Context()
+ }
+ if tb, ok := b.(*backOffTries); ok {
+ return getContext(tb.delegate)
+ }
+ return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+ return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+ select {
+ case <-b.ctx.Done():
+ return Stop
+ default:
+ return b.BackOff.NextBackOff()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 00000000..2c56c1e7
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,161 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ // After MaxElapsedTime the ExponentialBackOff returns Stop.
+ // It never stops if MaxElapsedTime == 0.
+ MaxElapsedTime time.Duration
+ Stop time.Duration
+ Clock Clock
+
+ currentInterval time.Duration
+ startTime time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+ Now() time.Time
+}
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+ DefaultMaxElapsedTime = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+ b := &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ Stop: Stop,
+ Clock: SystemClock,
+ }
+ b.Reset()
+ return b
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+ return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+ b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ // Make sure we have not gone over the maximum elapsed time.
+ elapsed := b.GetElapsedTime()
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+ return b.Stop
+ }
+ return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+ return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 00000000..b9c0c51c
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+ "errors"
+ "time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+ return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+ return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
+ if t == nil {
+ t = &defaultTimer{}
+ }
+
+ defer func() {
+ t.Stop()
+ }()
+
+ ctx := getContext(b)
+
+ b.Reset()
+ for {
+ res, err = operation()
+ if err == nil {
+ return res, nil
+ }
+
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Err
+ }
+
+ if next = b.NextBackOff(); next == Stop {
+ if cerr := ctx.Err(); cerr != nil {
+ return res, cerr
+ }
+
+ return res, err
+ }
+
+ if notify != nil {
+ notify(err, next)
+ }
+
+ t.Start(next)
+
+ select {
+ case <-ctx.Done():
+ return res, ctx.Err()
+ case <-t.C():
+ }
+ }
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+ _, ok := target.(*PermanentError)
+ return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 00000000..df9d68bc
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ b BackOff
+ ctx context.Context
+ timer Timer
+ stop chan struct{}
+ stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once. The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+ return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+ if timer == nil {
+ timer = &defaultTimer{}
+ }
+ c := make(chan time.Time)
+ t := &Ticker{
+ C: c,
+ c: c,
+ b: b,
+ ctx: getContext(b),
+ timer: timer,
+ stop: make(chan struct{}),
+ }
+ t.b.Reset()
+ go t.run()
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+ t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+ c := t.c
+ defer close(c)
+
+ // Ticker is guaranteed to tick at least once.
+ afterC := t.send(time.Now())
+
+ for {
+ if afterC == nil {
+ return
+ }
+
+ select {
+ case tick := <-afterC:
+ afterC = t.send(tick)
+ case <-t.stop:
+ t.c = nil // Prevent future ticks from being sent to the channel.
+ return
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+ select {
+ case t.c <- tick:
+ case <-t.stop:
+ return nil
+ }
+
+ next := t.b.NextBackOff()
+ if next == Stop {
+ t.Stop()
+ return nil
+ }
+
+ t.timer.Start(next)
+ return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 00000000..8120d021
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+ Start(duration time.Duration)
+ Stop()
+ C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+ timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+ return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+ if t.timer == nil {
+ t.timer = time.NewTimer(duration)
+ } else {
+ t.timer.Reset(duration)
+ }
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 00000000..28d58ca3
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+ return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+ delegate BackOff
+ maxTries uint64
+ numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+ if b.maxTries == 0 {
+ return Stop
+ }
+ if b.maxTries > 0 {
+ if b.maxTries <= b.numTries {
+ return Stop
+ }
+ b.numTries++
+ }
+ return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+ b.numTries = 0
+ b.delegate.Reset()
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
new file mode 100644
index 00000000..e068e731
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
new file mode 100644
index 00000000..9cacb32a
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
@@ -0,0 +1,570 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/agent/common/v1/common.proto
+
+// NOTE: This proto is experimental and is subject to change at this point.
+// Please do not use it at the moment.
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type LibraryInfo_Language int32
+
+const (
+ LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
+ LibraryInfo_CPP LibraryInfo_Language = 1
+ LibraryInfo_C_SHARP LibraryInfo_Language = 2
+ LibraryInfo_ERLANG LibraryInfo_Language = 3
+ LibraryInfo_GO_LANG LibraryInfo_Language = 4
+ LibraryInfo_JAVA LibraryInfo_Language = 5
+ LibraryInfo_NODE_JS LibraryInfo_Language = 6
+ LibraryInfo_PHP LibraryInfo_Language = 7
+ LibraryInfo_PYTHON LibraryInfo_Language = 8
+ LibraryInfo_RUBY LibraryInfo_Language = 9
+ LibraryInfo_WEB_JS LibraryInfo_Language = 10
+)
+
+// Enum value maps for LibraryInfo_Language.
+var (
+ LibraryInfo_Language_name = map[int32]string{
+ 0: "LANGUAGE_UNSPECIFIED",
+ 1: "CPP",
+ 2: "C_SHARP",
+ 3: "ERLANG",
+ 4: "GO_LANG",
+ 5: "JAVA",
+ 6: "NODE_JS",
+ 7: "PHP",
+ 8: "PYTHON",
+ 9: "RUBY",
+ 10: "WEB_JS",
+ }
+ LibraryInfo_Language_value = map[string]int32{
+ "LANGUAGE_UNSPECIFIED": 0,
+ "CPP": 1,
+ "C_SHARP": 2,
+ "ERLANG": 3,
+ "GO_LANG": 4,
+ "JAVA": 5,
+ "NODE_JS": 6,
+ "PHP": 7,
+ "PYTHON": 8,
+ "RUBY": 9,
+ "WEB_JS": 10,
+ }
+)
+
+func (x LibraryInfo_Language) Enum() *LibraryInfo_Language {
+ p := new(LibraryInfo_Language)
+ *p = x
+ return p
+}
+
+func (x LibraryInfo_Language) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (LibraryInfo_Language) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_agent_common_v1_common_proto_enumTypes[0].Descriptor()
+}
+
+func (LibraryInfo_Language) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_agent_common_v1_common_proto_enumTypes[0]
+}
+
+func (x LibraryInfo_Language) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use LibraryInfo_Language.Descriptor instead.
+func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// Identifier metadata of the Node that produces the span or tracing data.
+// Note, this is not the metadata about the Node or service that is described by associated spans.
+// In the future we plan to extend the identifier proto definition to support
+// additional information (e.g cloud id, etc.)
+type Node struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier that uniquely identifies a process within a VM/container.
+ Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // Information on the OpenCensus Library that initiates the stream.
+ LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
+ // Additional information on service.
+ ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
+ // Additional attributes.
+ Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Node) Reset() {
+ *x = Node{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Node) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Node) ProtoMessage() {}
+
+func (x *Node) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Node.ProtoReflect.Descriptor instead.
+func (*Node) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Node) GetIdentifier() *ProcessIdentifier {
+ if x != nil {
+ return x.Identifier
+ }
+ return nil
+}
+
+func (x *Node) GetLibraryInfo() *LibraryInfo {
+ if x != nil {
+ return x.LibraryInfo
+ }
+ return nil
+}
+
+func (x *Node) GetServiceInfo() *ServiceInfo {
+ if x != nil {
+ return x.ServiceInfo
+ }
+ return nil
+}
+
+func (x *Node) GetAttributes() map[string]string {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+// Identifier that uniquely identifies a process within a VM/container.
+type ProcessIdentifier struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The host name. Usually refers to the machine/container name.
+ // For example: os.Hostname() in Go, socket.gethostname() in Python.
+ HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
+ // Process id.
+ Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+ // Start time of this ProcessIdentifier. Represented in epoch time.
+ StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+}
+
+func (x *ProcessIdentifier) Reset() {
+ *x = ProcessIdentifier{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProcessIdentifier) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProcessIdentifier) ProtoMessage() {}
+
+func (x *ProcessIdentifier) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProcessIdentifier.ProtoReflect.Descriptor instead.
+func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ProcessIdentifier) GetHostName() string {
+ if x != nil {
+ return x.HostName
+ }
+ return ""
+}
+
+func (x *ProcessIdentifier) GetPid() uint32 {
+ if x != nil {
+ return x.Pid
+ }
+ return 0
+}
+
+func (x *ProcessIdentifier) GetStartTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTimestamp
+ }
+ return nil
+}
+
+// Information on OpenCensus Library.
+type LibraryInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Language of OpenCensus Library.
+ Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
+ // Version of Agent exporter of Library.
+ ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
+ // Version of OpenCensus Library.
+ CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
+}
+
+func (x *LibraryInfo) Reset() {
+ *x = LibraryInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LibraryInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LibraryInfo) ProtoMessage() {}
+
+func (x *LibraryInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LibraryInfo.ProtoReflect.Descriptor instead.
+func (*LibraryInfo) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *LibraryInfo) GetLanguage() LibraryInfo_Language {
+ if x != nil {
+ return x.Language
+ }
+ return LibraryInfo_LANGUAGE_UNSPECIFIED
+}
+
+func (x *LibraryInfo) GetExporterVersion() string {
+ if x != nil {
+ return x.ExporterVersion
+ }
+ return ""
+}
+
+func (x *LibraryInfo) GetCoreLibraryVersion() string {
+ if x != nil {
+ return x.CoreLibraryVersion
+ }
+ return ""
+}
+
+// Additional service information.
+type ServiceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the service.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *ServiceInfo) Reset() {
+ *x = ServiceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServiceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceInfo) ProtoMessage() {}
+
+func (x *ServiceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_common_v1_common_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead.
+func (*ServiceInfo) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ServiceInfo) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_opencensus_proto_agent_common_v1_common_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_agent_common_v1_common_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
+ 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x96, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x53, 0x0a, 0x0a, 0x69,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x12, 0x50, 0x0a, 0x0c, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x49, 0x6e,
+ 0x66, 0x6f, 0x12, 0x50, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e,
+ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x56, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
+ 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f,
+ 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x87, 0x01, 0x0a, 0x11,
+ 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
+ 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64,
+ 0x12, 0x43, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd6, 0x02, 0x0a, 0x0b, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x52, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65,
+ 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x52,
+ 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x70,
+ 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x62,
+ 0x72, 0x61, 0x72, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x0a, 0x08, 0x4c, 0x61, 0x6e, 0x67, 0x75,
+ 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x43, 0x50, 0x50, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x52,
+ 0x50, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x52, 0x4c, 0x41, 0x4e, 0x47, 0x10, 0x03, 0x12,
+ 0x0b, 0x0a, 0x07, 0x47, 0x4f, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04,
+ 0x4a, 0x41, 0x56, 0x41, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x4a,
+ 0x53, 0x10, 0x06, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x48, 0x50, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06,
+ 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, 0x08, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x55, 0x42, 0x59,
+ 0x10, 0x09, 0x12, 0x0a, 0x0a, 0x06, 0x57, 0x45, 0x42, 0x5f, 0x4a, 0x53, 0x10, 0x0a, 0x22, 0x21,
+ 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x42, 0xa6, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74,
+ 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e,
+ 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x2f, 0x76, 0x31, 0xea, 0x02, 0x24, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
+ 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_agent_common_v1_common_proto_rawDescOnce sync.Once
+ file_opencensus_proto_agent_common_v1_common_proto_rawDescData = file_opencensus_proto_agent_common_v1_common_proto_rawDesc
+)
+
+func file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_agent_common_v1_common_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_agent_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_agent_common_v1_common_proto_rawDescData)
+ })
+ return file_opencensus_proto_agent_common_v1_common_proto_rawDescData
+}
+
+var file_opencensus_proto_agent_common_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_opencensus_proto_agent_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opencensus_proto_agent_common_v1_common_proto_goTypes = []interface{}{
+ (LibraryInfo_Language)(0), // 0: opencensus.proto.agent.common.v1.LibraryInfo.Language
+ (*Node)(nil), // 1: opencensus.proto.agent.common.v1.Node
+ (*ProcessIdentifier)(nil), // 2: opencensus.proto.agent.common.v1.ProcessIdentifier
+ (*LibraryInfo)(nil), // 3: opencensus.proto.agent.common.v1.LibraryInfo
+ (*ServiceInfo)(nil), // 4: opencensus.proto.agent.common.v1.ServiceInfo
+ nil, // 5: opencensus.proto.agent.common.v1.Node.AttributesEntry
+ (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
+}
+var file_opencensus_proto_agent_common_v1_common_proto_depIdxs = []int32{
+ 2, // 0: opencensus.proto.agent.common.v1.Node.identifier:type_name -> opencensus.proto.agent.common.v1.ProcessIdentifier
+ 3, // 1: opencensus.proto.agent.common.v1.Node.library_info:type_name -> opencensus.proto.agent.common.v1.LibraryInfo
+ 4, // 2: opencensus.proto.agent.common.v1.Node.service_info:type_name -> opencensus.proto.agent.common.v1.ServiceInfo
+ 5, // 3: opencensus.proto.agent.common.v1.Node.attributes:type_name -> opencensus.proto.agent.common.v1.Node.AttributesEntry
+ 6, // 4: opencensus.proto.agent.common.v1.ProcessIdentifier.start_timestamp:type_name -> google.protobuf.Timestamp
+ 0, // 5: opencensus.proto.agent.common.v1.LibraryInfo.language:type_name -> opencensus.proto.agent.common.v1.LibraryInfo.Language
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_agent_common_v1_common_proto_init() }
+func file_opencensus_proto_agent_common_v1_common_proto_init() {
+ if File_opencensus_proto_agent_common_v1_common_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_agent_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Node); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProcessIdentifier); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LibraryInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServiceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_agent_common_v1_common_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opencensus_proto_agent_common_v1_common_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_agent_common_v1_common_proto_depIdxs,
+ EnumInfos: file_opencensus_proto_agent_common_v1_common_proto_enumTypes,
+ MessageInfos: file_opencensus_proto_agent_common_v1_common_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_agent_common_v1_common_proto = out.File
+ file_opencensus_proto_agent_common_v1_common_proto_rawDesc = nil
+ file_opencensus_proto_agent_common_v1_common_proto_goTypes = nil
+ file_opencensus_proto_agent_common_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
new file mode 100644
index 00000000..4f9faffd
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
@@ -0,0 +1,410 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+ context "context"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+ v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportMetricsServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is required only in the first message on the stream or if the
+ // previous sent ExportMetricsServiceRequest message has a different Node (e.g.
+ // when the same RPC is used to send Metrics from multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of metrics that belong to the last received Node.
+ Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+ // The resource for the metrics in this message that do not have an explicit
+ // resource set.
+ // If unset, the most recently set resource in the RPC stream applies. It is
+ // valid to never be set within a stream, e.g. when no resource info is known
+ // at all or when all sent metrics have an explicit resource set.
+ Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *ExportMetricsServiceRequest) Reset() {
+ *x = ExportMetricsServiceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportMetricsServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceRequest) ProtoMessage() {}
+
+func (x *ExportMetricsServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportMetricsServiceRequest) GetNode() *v1.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
+ if x != nil {
+ return x.Metrics
+ }
+ return nil
+}
+
+func (x *ExportMetricsServiceRequest) GetResource() *v12.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+type ExportMetricsServiceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ExportMetricsServiceResponse) Reset() {
+ *x = ExportMetricsServiceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportMetricsServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceResponse) ProtoMessage() {}
+
+func (x *ExportMetricsServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{1}
+}
+
+var File_opencensus_proto_agent_metrics_v1_metrics_service_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = []byte{
+ 0x0a, 0x37, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2d, 0x6f, 0x70,
+ 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x01, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12,
+ 0x3d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x42,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x22, 0x1e, 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74,
+ 0x12, 0x3e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x3f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xb1, 0x01, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31,
+ 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72,
+ 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d,
+ 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2f, 0x76, 0x31, 0xea, 0x02, 0x25, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescOnce sync.Once
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescData = file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc
+)
+
+func file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescData)
+ })
+ return file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDescData
+}
+
+var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_goTypes = []interface{}{
+ (*ExportMetricsServiceRequest)(nil), // 0: opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest
+ (*ExportMetricsServiceResponse)(nil), // 1: opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse
+ (*v1.Node)(nil), // 2: opencensus.proto.agent.common.v1.Node
+ (*v11.Metric)(nil), // 3: opencensus.proto.metrics.v1.Metric
+ (*v12.Resource)(nil), // 4: opencensus.proto.resource.v1.Resource
+}
+var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_depIdxs = []int32{
+ 2, // 0: opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest.node:type_name -> opencensus.proto.agent.common.v1.Node
+ 3, // 1: opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest.metrics:type_name -> opencensus.proto.metrics.v1.Metric
+ 4, // 2: opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest.resource:type_name -> opencensus.proto.resource.v1.Resource
+ 0, // 3: opencensus.proto.agent.metrics.v1.MetricsService.Export:input_type -> opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest
+ 1, // 4: opencensus.proto.agent.metrics.v1.MetricsService.Export:output_type -> opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse
+ 4, // [4:5] is the sub-list for method output_type
+ 3, // [3:4] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_agent_metrics_v1_metrics_service_proto_init() }
+func file_opencensus_proto_agent_metrics_v1_metrics_service_proto_init() {
+ if File_opencensus_proto_agent_metrics_v1_metrics_service_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportMetricsServiceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportMetricsServiceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_opencensus_proto_agent_metrics_v1_metrics_service_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_agent_metrics_v1_metrics_service_proto_depIdxs,
+ MessageInfos: file_opencensus_proto_agent_metrics_v1_metrics_service_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_agent_metrics_v1_metrics_service_proto = out.File
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = nil
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_goTypes = nil
+ file_opencensus_proto_agent_metrics_v1_metrics_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
+}
+
+type metricsServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
+ return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &metricsServiceExportClient{stream}
+ return x, nil
+}
+
+type MetricsService_ExportClient interface {
+ Send(*ExportMetricsServiceRequest) error
+ Recv() (*ExportMetricsServiceResponse, error)
+ grpc.ClientStream
+}
+
+type metricsServiceExportClient struct {
+ grpc.ClientStream
+}
+
+func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
+ m := new(ExportMetricsServiceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+type MetricsServiceServer interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(MetricsService_ExportServer) error
+}
+
+// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedMetricsServiceServer struct {
+}
+
+func (*UnimplementedMetricsServiceServer) Export(MetricsService_ExportServer) error {
+ return status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
+ s.RegisterService(&_MetricsService_serviceDesc, srv)
+}
+
+func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
+}
+
+type MetricsService_ExportServer interface {
+ Send(*ExportMetricsServiceResponse) error
+ Recv() (*ExportMetricsServiceRequest, error)
+ grpc.ServerStream
+}
+
+type metricsServiceExportServer struct {
+ grpc.ServerStream
+}
+
+func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
+ m := new(ExportMetricsServiceRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _MetricsService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
+ HandlerType: (*MetricsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Export",
+ Handler: _MetricsService_Export_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go
new file mode 100644
index 00000000..3cc9bae4
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.Export(ctx)
+ if err != nil {
+ grpclog.Infof("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq ExportMetricsServiceRequest
+ err := dec.Decode(&protoReq)
+ if err == io.EOF {
+ return err
+ }
+ if err != nil {
+ grpclog.Infof("Failed to decode request: %v", err)
+ return err
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Infof("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Infof("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Infof("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux".
+// UnaryRPC :call MetricsServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead.
+func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error {
+
+ mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+
+ return nil
+}
+
+// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterMetricsServiceHandler(ctx, mux, conn)
+}
+
+// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn))
+}
+
+// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MetricsServiceClient" to call the correct interceptors.
+func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error {
+
+ mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
+)
+
+var (
+ forward_MetricsService_Export_0 = runtime.ForwardResponseStream
+)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
new file mode 100644
index 00000000..c713bfb4
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
@@ -0,0 +1,664 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/agent/trace/v1/trace_service.proto
+
+// NOTE: This proto is experimental and is subject to change at this point.
+// Please do not use it at the moment.
+
+package v1
+
+import (
+ context "context"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CurrentLibraryConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is required only in the first message on the stream or if the
+ // previous sent CurrentLibraryConfig message has a different Node (e.g.
+ // when the same RPC is used to configure multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // Current configuration.
+ Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+}
+
+func (x *CurrentLibraryConfig) Reset() {
+ *x = CurrentLibraryConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CurrentLibraryConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CurrentLibraryConfig) ProtoMessage() {}
+
+func (x *CurrentLibraryConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CurrentLibraryConfig.ProtoReflect.Descriptor instead.
+func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CurrentLibraryConfig) GetNode() *v1.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+type UpdatedLibraryConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This field is ignored when the RPC is used to configure only one Application.
+ // This is required only in the first message on the stream or if the
+ // previous sent UpdatedLibraryConfig message has a different Node.
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // Requested updated configuration.
+ Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+}
+
+func (x *UpdatedLibraryConfig) Reset() {
+ *x = UpdatedLibraryConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdatedLibraryConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdatedLibraryConfig) ProtoMessage() {}
+
+func (x *UpdatedLibraryConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdatedLibraryConfig.ProtoReflect.Descriptor instead.
+func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *UpdatedLibraryConfig) GetNode() *v1.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+type ExportTraceServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is required only in the first message on the stream or if the
+ // previous sent ExportTraceServiceRequest message has a different Node (e.g.
+ // when the same RPC is used to send Spans from multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of Spans that belong to the last received Node.
+ Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The resource for the spans in this message that do not have an explicit
+ // resource set.
+ // If unset, the most recently set resource in the RPC stream applies. It is
+ // valid to never be set within a stream, e.g. when no resource info is known.
+ Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *ExportTraceServiceRequest) Reset() {
+ *x = ExportTraceServiceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+
+func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportTraceServiceRequest) GetNode() *v1.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *ExportTraceServiceRequest) GetSpans() []*v11.Span {
+ if x != nil {
+ return x.Spans
+ }
+ return nil
+}
+
+func (x *ExportTraceServiceRequest) GetResource() *v12.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+type ExportTraceServiceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ExportTraceServiceResponse) Reset() {
+ *x = ExportTraceServiceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+
+func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescGZIP(), []int{3}
+}
+
+var File_opencensus_proto_agent_trace_v1_trace_service_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = []byte{
+ 0x0a, 0x33, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76,
+ 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75,
+ 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x25, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x14, 0x43, 0x75, 0x72, 0x72,
+ 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x3e, 0x0a, 0x06,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x92, 0x01, 0x0a,
+ 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64,
+ 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72,
+ 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0xd2, 0x01, 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3a, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x73,
+ 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61,
+ 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x96, 0x02, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7c, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x35, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x35, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x00, 0x28,
+ 0x01, 0x30, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3a,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67,
+ 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
+ 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xa9, 0x01,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73,
+ 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65,
+ 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2f, 0x76, 0x31, 0xea, 0x02, 0x23, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescOnce sync.Once
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescData = file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc
+)
+
+func file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescData)
+ })
+ return file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDescData
+}
+
+var file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_opencensus_proto_agent_trace_v1_trace_service_proto_goTypes = []interface{}{
+ (*CurrentLibraryConfig)(nil), // 0: opencensus.proto.agent.trace.v1.CurrentLibraryConfig
+ (*UpdatedLibraryConfig)(nil), // 1: opencensus.proto.agent.trace.v1.UpdatedLibraryConfig
+ (*ExportTraceServiceRequest)(nil), // 2: opencensus.proto.agent.trace.v1.ExportTraceServiceRequest
+ (*ExportTraceServiceResponse)(nil), // 3: opencensus.proto.agent.trace.v1.ExportTraceServiceResponse
+ (*v1.Node)(nil), // 4: opencensus.proto.agent.common.v1.Node
+ (*v11.TraceConfig)(nil), // 5: opencensus.proto.trace.v1.TraceConfig
+ (*v11.Span)(nil), // 6: opencensus.proto.trace.v1.Span
+ (*v12.Resource)(nil), // 7: opencensus.proto.resource.v1.Resource
+}
+var file_opencensus_proto_agent_trace_v1_trace_service_proto_depIdxs = []int32{
+ 4, // 0: opencensus.proto.agent.trace.v1.CurrentLibraryConfig.node:type_name -> opencensus.proto.agent.common.v1.Node
+ 5, // 1: opencensus.proto.agent.trace.v1.CurrentLibraryConfig.config:type_name -> opencensus.proto.trace.v1.TraceConfig
+ 4, // 2: opencensus.proto.agent.trace.v1.UpdatedLibraryConfig.node:type_name -> opencensus.proto.agent.common.v1.Node
+ 5, // 3: opencensus.proto.agent.trace.v1.UpdatedLibraryConfig.config:type_name -> opencensus.proto.trace.v1.TraceConfig
+ 4, // 4: opencensus.proto.agent.trace.v1.ExportTraceServiceRequest.node:type_name -> opencensus.proto.agent.common.v1.Node
+ 6, // 5: opencensus.proto.agent.trace.v1.ExportTraceServiceRequest.spans:type_name -> opencensus.proto.trace.v1.Span
+ 7, // 6: opencensus.proto.agent.trace.v1.ExportTraceServiceRequest.resource:type_name -> opencensus.proto.resource.v1.Resource
+ 0, // 7: opencensus.proto.agent.trace.v1.TraceService.Config:input_type -> opencensus.proto.agent.trace.v1.CurrentLibraryConfig
+ 2, // 8: opencensus.proto.agent.trace.v1.TraceService.Export:input_type -> opencensus.proto.agent.trace.v1.ExportTraceServiceRequest
+ 1, // 9: opencensus.proto.agent.trace.v1.TraceService.Config:output_type -> opencensus.proto.agent.trace.v1.UpdatedLibraryConfig
+ 3, // 10: opencensus.proto.agent.trace.v1.TraceService.Export:output_type -> opencensus.proto.agent.trace.v1.ExportTraceServiceResponse
+ 9, // [9:11] is the sub-list for method output_type
+ 7, // [7:9] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_agent_trace_v1_trace_service_proto_init() }
+func file_opencensus_proto_agent_trace_v1_trace_service_proto_init() {
+ if File_opencensus_proto_agent_trace_v1_trace_service_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CurrentLibraryConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdatedLibraryConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_opencensus_proto_agent_trace_v1_trace_service_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_agent_trace_v1_trace_service_proto_depIdxs,
+ MessageInfos: file_opencensus_proto_agent_trace_v1_trace_service_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_agent_trace_v1_trace_service_proto = out.File
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = nil
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_goTypes = nil
+ file_opencensus_proto_agent_trace_v1_trace_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // After initialization, this RPC must be kept alive for the entire life of
+ // the application. The agent pushes configs down to applications via a
+ // stream.
+ Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
+}
+
+type traceServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &traceServiceConfigClient{stream}
+ return x, nil
+}
+
+type TraceService_ConfigClient interface {
+ Send(*CurrentLibraryConfig) error
+ Recv() (*UpdatedLibraryConfig, error)
+ grpc.ClientStream
+}
+
+type traceServiceConfigClient struct {
+ grpc.ClientStream
+}
+
+func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
+ m := new(UpdatedLibraryConfig)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &traceServiceExportClient{stream}
+ return x, nil
+}
+
+type TraceService_ExportClient interface {
+ Send(*ExportTraceServiceRequest) error
+ Recv() (*ExportTraceServiceResponse, error)
+ grpc.ClientStream
+}
+
+type traceServiceExportClient struct {
+ grpc.ClientStream
+}
+
+func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
+ m := new(ExportTraceServiceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+type TraceServiceServer interface {
+ // After initialization, this RPC must be kept alive for the entire life of
+ // the application. The agent pushes configs down to applications via a
+ // stream.
+ Config(TraceService_ConfigServer) error
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(TraceService_ExportServer) error
+}
+
+// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct {
+}
+
+func (*UnimplementedTraceServiceServer) Config(TraceService_ConfigServer) error {
+ return status.Errorf(codes.Unimplemented, "method Config not implemented")
+}
+func (*UnimplementedTraceServiceServer) Export(TraceService_ExportServer) error {
+ return status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
+ s.RegisterService(&_TraceService_serviceDesc, srv)
+}
+
+func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
+}
+
+type TraceService_ConfigServer interface {
+ Send(*UpdatedLibraryConfig) error
+ Recv() (*CurrentLibraryConfig, error)
+ grpc.ServerStream
+}
+
+type traceServiceConfigServer struct {
+ grpc.ServerStream
+}
+
+func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
+ m := new(CurrentLibraryConfig)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
+}
+
+type TraceService_ExportServer interface {
+ Send(*ExportTraceServiceResponse) error
+ Recv() (*ExportTraceServiceRequest, error)
+ grpc.ServerStream
+}
+
+type traceServiceExportServer struct {
+ grpc.ServerStream
+}
+
+func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
+ m := new(ExportTraceServiceRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _TraceService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Config",
+ Handler: _TraceService_Config_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Export",
+ Handler: _TraceService_Export_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 00000000..7808e9fb
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opencensus/proto/agent/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.Export(ctx)
+ if err != nil {
+ grpclog.Infof("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq ExportTraceServiceRequest
+ err := dec.Decode(&protoReq)
+ if err == io.EOF {
+ return err
+ }
+ if err != nil {
+ grpclog.Infof("Failed to decode request: %v", err)
+ return err
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Infof("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Infof("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Infof("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
+// UnaryRPC :call TraceServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
+func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+
+ return nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
+)
+
+var (
+ forward_TraceService_Export_0 = runtime.ForwardResponseStream
+)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
new file mode 100644
index 00000000..0cac88b2
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
@@ -0,0 +1,1632 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package describes the Metrics data model. It is currently experimental
+// but may eventually become the wire format for metrics. Please see
+// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/Metrics.md
+// for more details.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/metrics/v1/metrics.proto
+
+package v1
+
+import (
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The kind of metric. It describes how the data is reported.
+//
+// A gauge is an instantaneous measurement of a value.
+//
+// A cumulative measurement is a value accumulated over a time interval. In
+// a time series, cumulative measurements should have the same start time,
+// increasing values and increasing end times, until an event resets the
+// cumulative value to zero and sets a new start time for the following
+// points.
+type MetricDescriptor_Type int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
+ // Integer gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
+ // Floating point gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
+ // Distribution gauge measurement. The count and sum can go both up and
+ // down. Recorded values are always >= 0.
+ // Used in scenarios like a snapshot of time the current items in a queue
+ // have spent there.
+ MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
+ // Integer cumulative measurement. The value cannot decrease, if resets
+ // then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
+ // Floating point cumulative measurement. The value cannot decrease, if
+ // resets then the start_time should also be reset. Recorded values are
+ // always >= 0.
+ MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
+ // Distribution cumulative measurement. The count and sum cannot decrease,
+ // if resets then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
+ // Some frameworks implemented Histograms as a summary of observations
+ // (usually things like request durations and response sizes). While it
+ // also provides a total count of observations and a sum of all observed
+ // values, it calculates configurable percentiles over a sliding time
+ // window. This is not recommended, since it cannot be aggregated.
+ MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
+)
+
+// Enum value maps for MetricDescriptor_Type.
+var (
+ MetricDescriptor_Type_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "GAUGE_INT64",
+ 2: "GAUGE_DOUBLE",
+ 3: "GAUGE_DISTRIBUTION",
+ 4: "CUMULATIVE_INT64",
+ 5: "CUMULATIVE_DOUBLE",
+ 6: "CUMULATIVE_DISTRIBUTION",
+ 7: "SUMMARY",
+ }
+ MetricDescriptor_Type_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "GAUGE_INT64": 1,
+ "GAUGE_DOUBLE": 2,
+ "GAUGE_DISTRIBUTION": 3,
+ "CUMULATIVE_INT64": 4,
+ "CUMULATIVE_DOUBLE": 5,
+ "CUMULATIVE_DISTRIBUTION": 6,
+ "SUMMARY": 7,
+ }
+)
+
+func (x MetricDescriptor_Type) Enum() *MetricDescriptor_Type {
+ p := new(MetricDescriptor_Type)
+ *p = x
+ return p
+}
+
+func (x MetricDescriptor_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricDescriptor_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_metrics_v1_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricDescriptor_Type) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_metrics_v1_metrics_proto_enumTypes[0]
+}
+
+func (x MetricDescriptor_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MetricDescriptor_Type.Descriptor instead.
+func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Defines a Metric which has one or more timeseries.
+type Metric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The descriptor of the Metric.
+ // TODO(issue #152): consider only sending the name of descriptor for
+ // optimization.
+ MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
+ // One or more timeseries for a single metric, where each timeseries has
+ // one or more points.
+ Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
+ // The resource for the metric. If unset, it may be set to a default value
+ // provided for a sequence of messages in an RPC stream.
+ Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Metric) GetMetricDescriptor() *MetricDescriptor {
+ if x != nil {
+ return x.MetricDescriptor
+ }
+ return nil
+}
+
+func (x *Metric) GetTimeseries() []*TimeSeries {
+ if x != nil {
+ return x.Timeseries
+ }
+ return nil
+}
+
+func (x *Metric) GetResource() *v1.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+// Defines a metric type and its schema.
+type MetricDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The metric type, including its DNS name prefix. It must be unique.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A detailed description of the metric, which can be used in documentation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The unit in which the metric value is reported. Follows the format
+ // described by http://unitsofmeasure.org/ucum.html.
+ Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+ Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"`
+ // The label keys associated with the metric descriptor.
+ LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"`
+}
+
+func (x *MetricDescriptor) Reset() {
+ *x = MetricDescriptor{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricDescriptor) ProtoMessage() {}
+
+func (x *MetricDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricDescriptor.ProtoReflect.Descriptor instead.
+func (*MetricDescriptor) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *MetricDescriptor) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MetricDescriptor) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *MetricDescriptor) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+func (x *MetricDescriptor) GetType() MetricDescriptor_Type {
+ if x != nil {
+ return x.Type
+ }
+ return MetricDescriptor_UNSPECIFIED
+}
+
+func (x *MetricDescriptor) GetLabelKeys() []*LabelKey {
+ if x != nil {
+ return x.LabelKeys
+ }
+ return nil
+}
+
+// Defines a label key associated with a metric descriptor.
+type LabelKey struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The key for the label.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // A human-readable description of what this label key represents.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *LabelKey) Reset() {
+ *x = LabelKey{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LabelKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelKey) ProtoMessage() {}
+
+func (x *LabelKey) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelKey.ProtoReflect.Descriptor instead.
+func (*LabelKey) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *LabelKey) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *LabelKey) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric.
+type TimeSeries struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Must be present for cumulative metrics. The time when the cumulative value
+ // was reset to zero. Exclusive. The cumulative value is over the time interval
+ // (start_timestamp, timestamp]. If not specified, the backend can use the
+ // previous recorded value.
+ StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ // The set of label values that uniquely identify this timeseries. Applies to
+ // all points. The order of label values must match that of label keys in the
+ // metric descriptor.
+ LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
+ // The data points of this timeseries. Point.value type MUST match the
+ // MetricDescriptor.type.
+ Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
+}
+
+func (x *TimeSeries) Reset() {
+ *x = TimeSeries{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TimeSeries) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeries) ProtoMessage() {}
+
+func (x *TimeSeries) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead.
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TimeSeries) GetStartTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTimestamp
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetLabelValues() []*LabelValue {
+ if x != nil {
+ return x.LabelValues
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetPoints() []*Point {
+ if x != nil {
+ return x.Points
+ }
+ return nil
+}
+
+type LabelValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value for the label.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // If false the value field is ignored and considered not set.
+ // This is used to differentiate a missing label from an empty string.
+ HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"`
+}
+
+func (x *LabelValue) Reset() {
+ *x = LabelValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LabelValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelValue) ProtoMessage() {}
+
+func (x *LabelValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead.
+func (*LabelValue) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *LabelValue) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+func (x *LabelValue) GetHasValue() bool {
+ if x != nil {
+ return x.HasValue
+ }
+ return false
+}
+
+// A timestamped measurement.
+type Point struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The moment when this point was recorded. Inclusive.
+ // If not specified, the timestamp will be decided by the backend.
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // The actual point value.
+ //
+ // Types that are assignable to Value:
+ // *Point_Int64Value
+ // *Point_DoubleValue
+ // *Point_DistributionValue
+ // *Point_SummaryValue
+ Value isPoint_Value `protobuf_oneof:"value"`
+}
+
+func (x *Point) Reset() {
+ *x = Point{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Point) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Point) ProtoMessage() {}
+
+func (x *Point) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Point.ProtoReflect.Descriptor instead.
+func (*Point) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Point) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+
+func (m *Point) GetValue() isPoint_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *Point) GetInt64Value() int64 {
+ if x, ok := x.GetValue().(*Point_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Point) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*Point_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Point) GetDistributionValue() *DistributionValue {
+ if x, ok := x.GetValue().(*Point_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+func (x *Point) GetSummaryValue() *SummaryValue {
+ if x, ok := x.GetValue().(*Point_SummaryValue); ok {
+ return x.SummaryValue
+ }
+ return nil
+}
+
+type isPoint_Value interface {
+ isPoint_Value()
+}
+
+type Point_Int64Value struct {
+ // A 64-bit integer.
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Point_DoubleValue struct {
+ // A 64-bit double-precision floating-point number.
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Point_DistributionValue struct {
+ // A distribution value.
+ DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+type Point_SummaryValue struct {
+ // A summary value. This is not recommended, since it cannot be aggregated.
+ SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"`
+}
+
+func (*Point_Int64Value) isPoint_Value() {}
+
+func (*Point_DoubleValue) isPoint_Value() {}
+
+func (*Point_DistributionValue) isPoint_Value() {}
+
+func (*Point_SummaryValue) isPoint_Value() {}
+
+// Distribution contains summary statistics for a population of values. It
+// optionally contains a histogram representing the distribution of those
+// values across a set of buckets.
+type DistributionValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of values in the population. Must be non-negative. This value
+ // must equal the sum of the values in bucket_counts if a histogram is
+ // provided.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of the values in the population. If count is zero then this field
+ // must be zero.
+ Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // The sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If count is zero then this field must be zero.
+ SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
+ // Don't change bucket boundaries within a TimeSeries if your backend doesn't
+ // support this.
+ // TODO(issue #152): consider not required to send bucket options for
+ // optimization.
+ BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ // If there is a histogram, then the sum of the values in the Bucket counts
+ // must equal the value in the count field of the distribution.
+ Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
+}
+
+func (x *DistributionValue) Reset() {
+ *x = DistributionValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DistributionValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionValue) ProtoMessage() {}
+
+func (x *DistributionValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionValue.ProtoReflect.Descriptor instead.
+func (*DistributionValue) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *DistributionValue) GetCount() int64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *DistributionValue) GetSum() float64 {
+ if x != nil {
+ return x.Sum
+ }
+ return 0
+}
+
+func (x *DistributionValue) GetSumOfSquaredDeviation() float64 {
+ if x != nil {
+ return x.SumOfSquaredDeviation
+ }
+ return 0
+}
+
+func (x *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions {
+ if x != nil {
+ return x.BucketOptions
+ }
+ return nil
+}
+
+func (x *DistributionValue) GetBuckets() []*DistributionValue_Bucket {
+ if x != nil {
+ return x.Buckets
+ }
+ return nil
+}
+
+// The start_timestamp only applies to the count and sum in the SummaryValue.
+type SummaryValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The total number of recorded values since start_time. Optional since
+ // some systems don't expose this.
+ Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The total sum of recorded values since start_time. Optional since some
+ // systems don't expose this. If count is zero then this field must be zero.
+ // This field must be unset if the sum is not available.
+ Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // Values calculated over an arbitrary time window.
+ Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+}
+
+func (x *SummaryValue) Reset() {
+ *x = SummaryValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SummaryValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryValue) ProtoMessage() {}
+
+func (x *SummaryValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryValue.ProtoReflect.Descriptor instead.
+func (*SummaryValue) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SummaryValue) GetCount() *wrapperspb.Int64Value {
+ if x != nil {
+ return x.Count
+ }
+ return nil
+}
+
+func (x *SummaryValue) GetSum() *wrapperspb.DoubleValue {
+ if x != nil {
+ return x.Sum
+ }
+ return nil
+}
+
+func (x *SummaryValue) GetSnapshot() *SummaryValue_Snapshot {
+ if x != nil {
+ return x.Snapshot
+ }
+ return nil
+}
+
+// A Distribution may optionally contain a histogram of the values in the
+// population. The bucket boundaries for that histogram are described by
+// BucketOptions.
+//
+// If bucket_options has no type, then there is no histogram associated with
+// the Distribution.
+type DistributionValue_BucketOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Type:
+ // *DistributionValue_BucketOptions_Explicit_
+ Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"`
+}
+
+func (x *DistributionValue_BucketOptions) Reset() {
+ *x = DistributionValue_BucketOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DistributionValue_BucketOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionValue_BucketOptions) ProtoMessage() {}
+
+func (x *DistributionValue_BucketOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionValue_BucketOptions.ProtoReflect.Descriptor instead.
+func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit {
+ if x, ok := x.GetType().(*DistributionValue_BucketOptions_Explicit_); ok {
+ return x.Explicit
+ }
+ return nil
+}
+
+type isDistributionValue_BucketOptions_Type interface {
+ isDistributionValue_BucketOptions_Type()
+}
+
+type DistributionValue_BucketOptions_Explicit_ struct {
+ // Bucket with explicit bounds.
+ Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"`
+}
+
+func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {}
+
+type DistributionValue_Bucket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of values in each bucket of the histogram, as described in
+ // bucket_bounds.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
+}
+
+func (x *DistributionValue_Bucket) Reset() {
+ *x = DistributionValue_Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DistributionValue_Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionValue_Bucket) ProtoMessage() {}
+
+func (x *DistributionValue_Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionValue_Bucket.ProtoReflect.Descriptor instead.
+func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6, 1}
+}
+
+func (x *DistributionValue_Bucket) GetCount() int64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar {
+ if x != nil {
+ return x.Exemplar
+ }
+ return nil
+}
+
+// Exemplars are example points that may be used to annotate aggregated
+// Distribution values. They are metadata that gives information about a
+// particular value added to a Distribution bucket.
+type DistributionValue_Exemplar struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Value of the exemplar point. It determines which bucket the exemplar
+ // belongs to.
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The observation (sampling) time of the above value.
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // Contextual information about the example value.
+ Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *DistributionValue_Exemplar) Reset() {
+ *x = DistributionValue_Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DistributionValue_Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionValue_Exemplar) ProtoMessage() {}
+
+func (x *DistributionValue_Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionValue_Exemplar.ProtoReflect.Descriptor instead.
+func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6, 2}
+}
+
+func (x *DistributionValue_Exemplar) GetValue() float64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+func (x *DistributionValue_Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+
+func (x *DistributionValue_Exemplar) GetAttachments() map[string]string {
+ if x != nil {
+ return x.Attachments
+ }
+ return nil
+}
+
+// Specifies a set of buckets with arbitrary upper-bounds.
+// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
+// index i are:
+//
+// [0, bucket_bounds[i]) for i == 0
+// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1
+// [bucket_bounds[i], +infinity) for i == N-1
+type DistributionValue_BucketOptions_Explicit struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The values must be strictly increasing and > 0.
+ Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
+}
+
+func (x *DistributionValue_BucketOptions_Explicit) Reset() {
+ *x = DistributionValue_BucketOptions_Explicit{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DistributionValue_BucketOptions_Explicit) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {}
+
+func (x *DistributionValue_BucketOptions_Explicit) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionValue_BucketOptions_Explicit.ProtoReflect.Descriptor instead.
+func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6, 0, 0}
+}
+
+func (x *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 {
+ if x != nil {
+ return x.Bounds
+ }
+ return nil
+}
+
+// The values in this message can be reset at arbitrary unknown times, with
+// the requirement that all of them are reset at the same time.
+type SummaryValue_Snapshot struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of values in the snapshot. Optional since some systems don't
+ // expose this.
+ Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of values in the snapshot. Optional since some systems don't
+ // expose this. If count is zero then this field must be zero or not set
+ // (if not supported).
+ Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // A list of values at different percentiles of the distribution calculated
+ // from the current snapshot. The percentiles must be strictly increasing.
+ PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
+}
+
+func (x *SummaryValue_Snapshot) Reset() {
+ *x = SummaryValue_Snapshot{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SummaryValue_Snapshot) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryValue_Snapshot) ProtoMessage() {}
+
+func (x *SummaryValue_Snapshot) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryValue_Snapshot.ProtoReflect.Descriptor instead.
+func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7, 0}
+}
+
+func (x *SummaryValue_Snapshot) GetCount() *wrapperspb.Int64Value {
+ if x != nil {
+ return x.Count
+ }
+ return nil
+}
+
+func (x *SummaryValue_Snapshot) GetSum() *wrapperspb.DoubleValue {
+ if x != nil {
+ return x.Sum
+ }
+ return nil
+}
+
+func (x *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile {
+ if x != nil {
+ return x.PercentileValues
+ }
+ return nil
+}
+
+// Represents the value at a given percentile of a distribution.
+type SummaryValue_Snapshot_ValueAtPercentile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The percentile of a distribution. Must be in the interval
+ // (0.0, 100.0].
+ Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"`
+ // The value at the given percentile of a distribution.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *SummaryValue_Snapshot_ValueAtPercentile) Reset() {
+ *x = SummaryValue_Snapshot_ValueAtPercentile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SummaryValue_Snapshot_ValueAtPercentile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {}
+
+func (x *SummaryValue_Snapshot_ValueAtPercentile) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryValue_Snapshot_ValueAtPercentile.ProtoReflect.Descriptor instead.
+func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7, 0, 0}
+}
+
+func (x *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 {
+ if x != nil {
+ return x.Percentile
+ }
+ return 0
+}
+
+func (x *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_opencensus_proto_metrics_v1_metrics_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70,
+ 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf1, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x12, 0x5a, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x47, 0x0a,
+ 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x96, 0x03, 0x0a, 0x10, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65,
+ 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18,
+ 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x6c, 0x61,
+ 0x62, 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x22, 0xa9, 0x01, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42,
+ 0x4c, 0x45, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x44, 0x49,
+ 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10,
+ 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45,
+ 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x55, 0x4d,
+ 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52,
+ 0x59, 0x10, 0x07, 0x22, 0x3e, 0x0a, 0x08, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x22, 0xd9, 0x01, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x4a, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22,
+ 0x3f, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x61, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x68, 0x61, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0xc5, 0x02, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c,
+ 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52,
+ 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x5f, 0x0a, 0x12,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x11, 0x64, 0x69, 0x73, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x50, 0x0a,
+ 0x0d, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x0c, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xcb, 0x06, 0x0a, 0x11, 0x44, 0x69, 0x73,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x75, 0x6d, 0x5f, 0x6f, 0x66,
+ 0x5f, 0x73, 0x71, 0x75, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x75, 0x6d, 0x4f, 0x66, 0x53,
+ 0x71, 0x75, 0x61, 0x72, 0x65, 0x64, 0x44, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x63, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65,
+ 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18,
+ 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x1a, 0xa0, 0x01, 0x0a, 0x0d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x08, 0x65, 0x78, 0x70, 0x6c, 0x69,
+ 0x63, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74,
+ 0x48, 0x00, 0x52, 0x08, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x1a, 0x22, 0x0a, 0x08,
+ 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6f, 0x75, 0x6e,
+ 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73,
+ 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x73, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d,
+ 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70,
+ 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x1a, 0x86, 0x02,
+ 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
+ 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x6a, 0x0a, 0x0b, 0x61, 0x74,
+ 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x48, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69,
+ 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+ 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x2e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63,
+ 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xef, 0x03, 0x0a, 0x0c, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x03, 0x73, 0x75,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x4e, 0x0a, 0x08, 0x73, 0x6e,
+ 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
+ 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x1a, 0xab, 0x02, 0x0a, 0x08, 0x53,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x03, 0x73, 0x75,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x71, 0x0a, 0x11, 0x70, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41,
+ 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x10, 0x70, 0x65, 0x72,
+ 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x49, 0x0a,
+ 0x11, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69,
+ 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69,
+ 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x97, 0x01, 0x0a, 0x1e, 0x69, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69,
+ 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76,
+ 0x31, 0xea, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_metrics_v1_metrics_proto_rawDescOnce sync.Once
+ file_opencensus_proto_metrics_v1_metrics_proto_rawDescData = file_opencensus_proto_metrics_v1_metrics_proto_rawDesc
+)
+
+func file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_metrics_v1_metrics_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_metrics_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_metrics_v1_metrics_proto_rawDescData)
+ })
+ return file_opencensus_proto_metrics_v1_metrics_proto_rawDescData
+}
+
+var file_opencensus_proto_metrics_v1_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_opencensus_proto_metrics_v1_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_opencensus_proto_metrics_v1_metrics_proto_goTypes = []interface{}{
+ (MetricDescriptor_Type)(0), // 0: opencensus.proto.metrics.v1.MetricDescriptor.Type
+ (*Metric)(nil), // 1: opencensus.proto.metrics.v1.Metric
+ (*MetricDescriptor)(nil), // 2: opencensus.proto.metrics.v1.MetricDescriptor
+ (*LabelKey)(nil), // 3: opencensus.proto.metrics.v1.LabelKey
+ (*TimeSeries)(nil), // 4: opencensus.proto.metrics.v1.TimeSeries
+ (*LabelValue)(nil), // 5: opencensus.proto.metrics.v1.LabelValue
+ (*Point)(nil), // 6: opencensus.proto.metrics.v1.Point
+ (*DistributionValue)(nil), // 7: opencensus.proto.metrics.v1.DistributionValue
+ (*SummaryValue)(nil), // 8: opencensus.proto.metrics.v1.SummaryValue
+ (*DistributionValue_BucketOptions)(nil), // 9: opencensus.proto.metrics.v1.DistributionValue.BucketOptions
+ (*DistributionValue_Bucket)(nil), // 10: opencensus.proto.metrics.v1.DistributionValue.Bucket
+ (*DistributionValue_Exemplar)(nil), // 11: opencensus.proto.metrics.v1.DistributionValue.Exemplar
+ (*DistributionValue_BucketOptions_Explicit)(nil), // 12: opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit
+ nil, // 13: opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry
+ (*SummaryValue_Snapshot)(nil), // 14: opencensus.proto.metrics.v1.SummaryValue.Snapshot
+ (*SummaryValue_Snapshot_ValueAtPercentile)(nil), // 15: opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile
+ (*v1.Resource)(nil), // 16: opencensus.proto.resource.v1.Resource
+ (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp
+ (*wrapperspb.Int64Value)(nil), // 18: google.protobuf.Int64Value
+ (*wrapperspb.DoubleValue)(nil), // 19: google.protobuf.DoubleValue
+}
+var file_opencensus_proto_metrics_v1_metrics_proto_depIdxs = []int32{
+ 2, // 0: opencensus.proto.metrics.v1.Metric.metric_descriptor:type_name -> opencensus.proto.metrics.v1.MetricDescriptor
+ 4, // 1: opencensus.proto.metrics.v1.Metric.timeseries:type_name -> opencensus.proto.metrics.v1.TimeSeries
+ 16, // 2: opencensus.proto.metrics.v1.Metric.resource:type_name -> opencensus.proto.resource.v1.Resource
+ 0, // 3: opencensus.proto.metrics.v1.MetricDescriptor.type:type_name -> opencensus.proto.metrics.v1.MetricDescriptor.Type
+ 3, // 4: opencensus.proto.metrics.v1.MetricDescriptor.label_keys:type_name -> opencensus.proto.metrics.v1.LabelKey
+ 17, // 5: opencensus.proto.metrics.v1.TimeSeries.start_timestamp:type_name -> google.protobuf.Timestamp
+ 5, // 6: opencensus.proto.metrics.v1.TimeSeries.label_values:type_name -> opencensus.proto.metrics.v1.LabelValue
+ 6, // 7: opencensus.proto.metrics.v1.TimeSeries.points:type_name -> opencensus.proto.metrics.v1.Point
+ 17, // 8: opencensus.proto.metrics.v1.Point.timestamp:type_name -> google.protobuf.Timestamp
+ 7, // 9: opencensus.proto.metrics.v1.Point.distribution_value:type_name -> opencensus.proto.metrics.v1.DistributionValue
+ 8, // 10: opencensus.proto.metrics.v1.Point.summary_value:type_name -> opencensus.proto.metrics.v1.SummaryValue
+ 9, // 11: opencensus.proto.metrics.v1.DistributionValue.bucket_options:type_name -> opencensus.proto.metrics.v1.DistributionValue.BucketOptions
+ 10, // 12: opencensus.proto.metrics.v1.DistributionValue.buckets:type_name -> opencensus.proto.metrics.v1.DistributionValue.Bucket
+ 18, // 13: opencensus.proto.metrics.v1.SummaryValue.count:type_name -> google.protobuf.Int64Value
+ 19, // 14: opencensus.proto.metrics.v1.SummaryValue.sum:type_name -> google.protobuf.DoubleValue
+ 14, // 15: opencensus.proto.metrics.v1.SummaryValue.snapshot:type_name -> opencensus.proto.metrics.v1.SummaryValue.Snapshot
+ 12, // 16: opencensus.proto.metrics.v1.DistributionValue.BucketOptions.explicit:type_name -> opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit
+ 11, // 17: opencensus.proto.metrics.v1.DistributionValue.Bucket.exemplar:type_name -> opencensus.proto.metrics.v1.DistributionValue.Exemplar
+ 17, // 18: opencensus.proto.metrics.v1.DistributionValue.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 13, // 19: opencensus.proto.metrics.v1.DistributionValue.Exemplar.attachments:type_name -> opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry
+ 18, // 20: opencensus.proto.metrics.v1.SummaryValue.Snapshot.count:type_name -> google.protobuf.Int64Value
+ 19, // 21: opencensus.proto.metrics.v1.SummaryValue.Snapshot.sum:type_name -> google.protobuf.DoubleValue
+ 15, // 22: opencensus.proto.metrics.v1.SummaryValue.Snapshot.percentile_values:type_name -> opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile
+ 23, // [23:23] is the sub-list for method output_type
+ 23, // [23:23] is the sub-list for method input_type
+ 23, // [23:23] is the sub-list for extension type_name
+ 23, // [23:23] is the sub-list for extension extendee
+ 0, // [0:23] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_metrics_v1_metrics_proto_init() }
+func file_opencensus_proto_metrics_v1_metrics_proto_init() {
+ if File_opencensus_proto_metrics_v1_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricDescriptor); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelKey); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TimeSeries); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Point); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DistributionValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SummaryValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DistributionValue_BucketOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DistributionValue_Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DistributionValue_Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DistributionValue_BucketOptions_Explicit); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SummaryValue_Snapshot); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SummaryValue_Snapshot_ValueAtPercentile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*Point_Int64Value)(nil),
+ (*Point_DoubleValue)(nil),
+ (*Point_DistributionValue)(nil),
+ (*Point_SummaryValue)(nil),
+ }
+ file_opencensus_proto_metrics_v1_metrics_proto_msgTypes[8].OneofWrappers = []interface{}{
+ (*DistributionValue_BucketOptions_Explicit_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_metrics_v1_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opencensus_proto_metrics_v1_metrics_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_metrics_v1_metrics_proto_depIdxs,
+ EnumInfos: file_opencensus_proto_metrics_v1_metrics_proto_enumTypes,
+ MessageInfos: file_opencensus_proto_metrics_v1_metrics_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_metrics_v1_metrics_proto = out.File
+ file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = nil
+ file_opencensus_proto_metrics_v1_metrics_proto_goTypes = nil
+ file_opencensus_proto_metrics_v1_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
new file mode 100644
index 00000000..194dd70d
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
@@ -0,0 +1,189 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource information.
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type identifier for the resource.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Set of labels that describe the resource.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_resource_v1_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_resource_v1_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Resource) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+var File_opencensus_proto_resource_v1_resource_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_resource_v1_resource_proto_rawDesc = []byte{
+ 0x0a, 0x2b, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, 0xa5, 0x01, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4a, 0x0a, 0x06,
+ 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x42, 0x9b, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74,
+ 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e,
+ 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea,
+ 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x3a, 0x56,
+ 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_resource_v1_resource_proto_rawDescOnce sync.Once
+ file_opencensus_proto_resource_v1_resource_proto_rawDescData = file_opencensus_proto_resource_v1_resource_proto_rawDesc
+)
+
+func file_opencensus_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_resource_v1_resource_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_resource_v1_resource_proto_rawDescData)
+ })
+ return file_opencensus_proto_resource_v1_resource_proto_rawDescData
+}
+
+var file_opencensus_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_opencensus_proto_resource_v1_resource_proto_goTypes = []interface{}{
+ (*Resource)(nil), // 0: opencensus.proto.resource.v1.Resource
+ nil, // 1: opencensus.proto.resource.v1.Resource.LabelsEntry
+}
+var file_opencensus_proto_resource_v1_resource_proto_depIdxs = []int32{
+ 1, // 0: opencensus.proto.resource.v1.Resource.labels:type_name -> opencensus.proto.resource.v1.Resource.LabelsEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_resource_v1_resource_proto_init() }
+func file_opencensus_proto_resource_v1_resource_proto_init() {
+ if File_opencensus_proto_resource_v1_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_resource_v1_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opencensus_proto_resource_v1_resource_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_resource_v1_resource_proto_depIdxs,
+ MessageInfos: file_opencensus_proto_resource_v1_resource_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_resource_v1_resource_proto = out.File
+ file_opencensus_proto_resource_v1_resource_proto_rawDesc = nil
+ file_opencensus_proto_resource_v1_resource_proto_goTypes = nil
+ file_opencensus_proto_resource_v1_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
new file mode 100644
index 00000000..d35612ca
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
@@ -0,0 +1,2235 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+ // Unspecified.
+ Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ Span_SERVER Span_SpanKind = 1
+ // Indicates that the span covers the client-side wrapper around an RPC or
+ // other remote request.
+ Span_CLIENT Span_SpanKind = 2
+)
+
+// Enum value maps for Span_SpanKind.
+var (
+ Span_SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SERVER",
+ 2: "CLIENT",
+ }
+ Span_SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SERVER": 1,
+ "CLIENT": 2,
+ }
+)
+
+func (x Span_SpanKind) Enum() *Span_SpanKind {
+ p := new(Span_SpanKind)
+ *p = x
+ return p
+}
+
+func (x Span_SpanKind) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (Span_SpanKind) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x Span_SpanKind) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_SpanKind.Descriptor instead.
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Indicates whether the message was sent or received.
+type Span_TimeEvent_MessageEvent_Type int32
+
+const (
+ // Unknown event type.
+ Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
+ // Indicates a sent message.
+ Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
+ // Indicates a received message.
+ Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
+)
+
+// Enum value maps for Span_TimeEvent_MessageEvent_Type.
+var (
+ Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "SENT",
+ 2: "RECEIVED",
+ }
+ Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "SENT": 1,
+ "RECEIVED": 2,
+ }
+)
+
+func (x Span_TimeEvent_MessageEvent_Type) Enum() *Span_TimeEvent_MessageEvent_Type {
+ p := new(Span_TimeEvent_MessageEvent_Type)
+ *p = x
+ return p
+}
+
+func (x Span_TimeEvent_MessageEvent_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_TimeEvent_MessageEvent_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+}
+
+func (Span_TimeEvent_MessageEvent_Type) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_trace_v1_trace_proto_enumTypes[1]
+}
+
+func (x Span_TimeEvent_MessageEvent_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_TimeEvent_MessageEvent_Type.Descriptor instead.
+func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2, 1, 0}
+}
+
+// The relationship of the current span relative to the linked span: child,
+// parent, or unspecified.
+type Span_Link_Type int32
+
+const (
+ // The relationship of the two spans is unknown, or known but other
+ // than parent-child.
+ Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
+ // The linked span is a child of the current span.
+ Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
+ // The linked span is a parent of the current span.
+ Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
+)
+
+// Enum value maps for Span_Link_Type.
+var (
+ Span_Link_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "CHILD_LINKED_SPAN",
+ 2: "PARENT_LINKED_SPAN",
+ }
+ Span_Link_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "CHILD_LINKED_SPAN": 1,
+ "PARENT_LINKED_SPAN": 2,
+ }
+)
+
+func (x Span_Link_Type) Enum() *Span_Link_Type {
+ p := new(Span_Link_Type)
+ *p = x
+ return p
+}
+
+func (x Span_Link_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_Link_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
+}
+
+func (Span_Link_Type) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_trace_v1_trace_proto_enumTypes[2]
+}
+
+func (x Span_Link_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_Link_Type.Descriptor instead.
+func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 4, 0}
+}
+
+// A span represents a single operation within a trace. Spans can be
+// nested to form a trace tree. Spans may also be linked to other spans
+// from the same or different trace. And form graphs. Often, a trace
+// contains a root span that describes the end-to-end latency, and one
+// or more subspans for its sub-operations. A trace can also contain
+// multiple root spans, or none at all. Spans do not need to be
+// contiguous - there may be gaps or overlaps between spans in a trace.
+//
+// The next id is 17.
+// TODO(bdrutu): Add an example.
+type Span struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
+ // is considered invalid.
+ //
+ // This field is semantically required. Receiver should generate new
+ // random trace_id if empty or invalid trace_id was received.
+ //
+ // This field is required.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes is considered
+ // invalid.
+ //
+ // This field is semantically required. Receiver should generate new
+ // random span_id if empty or invalid span_id was received.
+ //
+ // This field is required.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The Tracestate on the span.
+ Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // When null or empty string received - receiver may use string "name"
+ // as a replacement. There might be smarted algorithms implemented by
+ // receiver to fix the empty span name.
+ //
+ // This field is required.
+ Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+ // The start time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ //
+ // This field is semantically required. When not set on receive -
+ // receiver should set it to the value of end_time field if it was
+ // set. Or to the current time if neither was set. It is important to
+ // keep end_time > start_time for consistency.
+ //
+ // This field is required.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The end time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ //
+ // This field is semantically required. When not set on receive -
+ // receiver should set it to start_time value. It is important to
+ // keep end_time > start_time for consistency.
+ //
+ // This field is required.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // A set of attributes on the span.
+ Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ // A stack trace captured at the start of the span.
+ StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
+ // The included time events.
+ TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
+ // The included links.
+ Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
+ // An optional final status for this span. Semantically when Status
+ // wasn't set it is means span ended without errors and assume
+ // Status.Ok (code = 0).
+ Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
+ // An optional resource that is associated with this span. If not set, this span
+ // should be part of a batch that does include the resource information, unless resource
+ // information is unknown.
+ Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"`
+ // A highly recommended but not required flag that identifies when a
+ // trace crosses a process boundary. True when the parent_span belongs
+ // to the same process as the current span. This flag is most commonly
+ // used to indicate the need to adjust time as clocks in different
+ // processes may not be synchronized.
+ SameProcessAsParentSpan *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
+ // An optional number of child spans that were generated while this span
+ // was active. If set, allows an implementation to detect missing child spans.
+ ChildSpanCount *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
+}
+
+func (x *Span) Reset() {
+ *x = Span{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span) ProtoMessage() {}
+
+func (x *Span) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span.ProtoReflect.Descriptor instead.
+func (*Span) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Span) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span) GetTracestate() *Span_Tracestate {
+ if x != nil {
+ return x.Tracestate
+ }
+ return nil
+}
+
+func (x *Span) GetParentSpanId() []byte {
+ if x != nil {
+ return x.ParentSpanId
+ }
+ return nil
+}
+
+func (x *Span) GetName() *TruncatableString {
+ if x != nil {
+ return x.Name
+ }
+ return nil
+}
+
+func (x *Span) GetKind() Span_SpanKind {
+ if x != nil {
+ return x.Kind
+ }
+ return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (x *Span) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *Span) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+func (x *Span) GetAttributes() *Span_Attributes {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span) GetStackTrace() *StackTrace {
+ if x != nil {
+ return x.StackTrace
+ }
+ return nil
+}
+
+func (x *Span) GetTimeEvents() *Span_TimeEvents {
+ if x != nil {
+ return x.TimeEvents
+ }
+ return nil
+}
+
+func (x *Span) GetLinks() *Span_Links {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+func (x *Span) GetStatus() *Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+func (x *Span) GetResource() *v1.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *Span) GetSameProcessAsParentSpan() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.SameProcessAsParentSpan
+ }
+ return nil
+}
+
+func (x *Span) GetChildSpanCount() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.ChildSpanCount
+ }
+ return nil
+}
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. This proto's fields
+// are a subset of those of
+// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto),
+// which is used by [gRPC](https://github.com/grpc).
+type Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The status code. This is optional field. It is safe to assume 0 (OK)
+ // when not set.
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ // A developer-facing error message, which should be in English.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+// The value of an Attribute.
+type AttributeValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of the value.
+ //
+ // Types that are assignable to Value:
+ // *AttributeValue_StringValue
+ // *AttributeValue_IntValue
+ // *AttributeValue_BoolValue
+ // *AttributeValue_DoubleValue
+ Value isAttributeValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *AttributeValue) Reset() {
+ *x = AttributeValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AttributeValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AttributeValue) ProtoMessage() {}
+
+func (x *AttributeValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AttributeValue.ProtoReflect.Descriptor instead.
+func (*AttributeValue) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *AttributeValue) GetValue() isAttributeValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *AttributeValue) GetStringValue() *TruncatableString {
+ if x, ok := x.GetValue().(*AttributeValue_StringValue); ok {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (x *AttributeValue) GetIntValue() int64 {
+ if x, ok := x.GetValue().(*AttributeValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (x *AttributeValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*AttributeValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *AttributeValue) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*AttributeValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+type isAttributeValue_Value interface {
+ isAttributeValue_Value()
+}
+
+type AttributeValue_StringValue struct {
+ // A string up to 256 bytes long.
+ StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AttributeValue_IntValue struct {
+ // A 64-bit signed integer.
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AttributeValue_BoolValue struct {
+ // A Boolean value represented by `true` or `false`.
+ BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AttributeValue_DoubleValue struct {
+ // A double value.
+ DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+func (*AttributeValue_StringValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_IntValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_DoubleValue) isAttributeValue_Value() {}
+
+// The call stack which originated this span.
+type StackTrace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Stack frames in this stack trace.
+ StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
+ // The hash ID is used to conserve network bandwidth for duplicate
+ // stack traces within a single trace.
+ //
+ // Often multiple spans will have identical stack traces.
+ // The first occurrence of a stack trace should contain both
+ // `stack_frames` and a value in `stack_trace_hash_id`.
+ //
+ // Subsequent spans within the same request can refer
+ // to that stack trace by setting only `stack_trace_hash_id`.
+ //
+ // TODO: describe how to deal with the case where stack_trace_hash_id is
+ // zero because it was not set.
+ StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
+}
+
+func (x *StackTrace) Reset() {
+ *x = StackTrace{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StackTrace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StackTrace) ProtoMessage() {}
+
+func (x *StackTrace) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StackTrace.ProtoReflect.Descriptor instead.
+func (*StackTrace) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StackTrace) GetStackFrames() *StackTrace_StackFrames {
+ if x != nil {
+ return x.StackFrames
+ }
+ return nil
+}
+
+func (x *StackTrace) GetStackTraceHashId() uint64 {
+ if x != nil {
+ return x.StackTraceHashId
+ }
+ return 0
+}
+
+// A description of a binary module.
+type Module struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // TODO: document the meaning of this field.
+ // For example: main binary, kernel modules, and dynamic libraries
+ // such as libc.so, sharedlib.so.
+ Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // A unique identifier for the module, usually a hash of its
+ // contents.
+ BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
+}
+
+func (x *Module) Reset() {
+ *x = Module{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Module) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Module) ProtoMessage() {}
+
+func (x *Module) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Module.ProtoReflect.Descriptor instead.
+func (*Module) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Module) GetModule() *TruncatableString {
+ if x != nil {
+ return x.Module
+ }
+ return nil
+}
+
+func (x *Module) GetBuildId() *TruncatableString {
+ if x != nil {
+ return x.BuildId
+ }
+ return nil
+}
+
+// A string that might be shortened to a specified length.
+type TruncatableString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The shortened string. For example, if the original string was 500 bytes long and
+ // the limit of the string was 128 bytes, then this value contains the first 128
+ // bytes of the 500-byte string. Note that truncation always happens on a
+ // character boundary, to ensure that a truncated string is still valid UTF-8.
+ // Because it may contain multi-byte characters, the size of the truncated string
+ // may be less than the truncation limit.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The number of bytes removed from the original string. If this
+ // value is 0, then the string was not shortened.
+ TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
+}
+
+func (x *TruncatableString) Reset() {
+ *x = TruncatableString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TruncatableString) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TruncatableString) ProtoMessage() {}
+
+func (x *TruncatableString) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TruncatableString.ProtoReflect.Descriptor instead.
+func (*TruncatableString) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *TruncatableString) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+func (x *TruncatableString) GetTruncatedByteCount() int32 {
+ if x != nil {
+ return x.TruncatedByteCount
+ }
+ return 0
+}
+
+// This field conveys information about request position in multiple distributed tracing graphs.
+// It is a list of Tracestate.Entry with a maximum of 32 members in the list.
+//
+// See the https://github.com/w3c/distributed-tracing for more details about this field.
+type Span_Tracestate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of entries that represent the Tracestate.
+ Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Span_Tracestate) Reset() {
+ *x = Span_Tracestate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Tracestate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Tracestate) ProtoMessage() {}
+
+func (x *Span_Tracestate) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Tracestate.ProtoReflect.Descriptor instead.
+func (*Span_Tracestate) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+// A set of attributes, each with a key and a value.
+type Span_Attributes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of attributes. The value can be a string, an integer, a double
+ // or the Boolean values `true` or `false`. Note, global attributes like
+ // server name can be set as tags using resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "abc.com/myattribute": true
+ // "abc.com/score": 10.239
+ AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The number of attributes that were discarded. Attributes can be discarded
+ // because their keys are too long or because there are too many attributes.
+ // If this value is 0, then no attributes were dropped.
+ DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Attributes) Reset() {
+ *x = Span_Attributes{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Attributes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Attributes) ProtoMessage() {}
+
+func (x *Span_Attributes) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Attributes.ProtoReflect.Descriptor instead.
+func (*Span_Attributes) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
+ if x != nil {
+ return x.AttributeMap
+ }
+ return nil
+}
+
+func (x *Span_Attributes) GetDroppedAttributesCount() int32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A time-stamped annotation or message event in the Span.
+type Span_TimeEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time the event occurred.
+ Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
+ // A `TimeEvent` can contain either an `Annotation` object or a
+ // `MessageEvent` object, but not both.
+ //
+ // Types that are assignable to Value:
+ // *Span_TimeEvent_Annotation_
+ // *Span_TimeEvent_MessageEvent_
+ Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
+}
+
+func (x *Span_TimeEvent) Reset() {
+ *x = Span_TimeEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_TimeEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_TimeEvent) ProtoMessage() {}
+
+func (x *Span_TimeEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_TimeEvent.ProtoReflect.Descriptor instead.
+func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Span_TimeEvent) GetTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Time
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
+ if x, ok := x.GetValue().(*Span_TimeEvent_Annotation_); ok {
+ return x.Annotation
+ }
+ return nil
+}
+
+func (x *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
+ if x, ok := x.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
+ return x.MessageEvent
+ }
+ return nil
+}
+
+type isSpan_TimeEvent_Value interface {
+ isSpan_TimeEvent_Value()
+}
+
+type Span_TimeEvent_Annotation_ struct {
+ // A text annotation with a set of attributes.
+ Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
+}
+
+type Span_TimeEvent_MessageEvent_ struct {
+ // An event describing a message sent/received between Spans.
+ MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
+}
+
+func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
+
+func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
+
+// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
+// on the span, consisting of either user-supplied key-value pairs, or
+// details of a message sent/received between Spans.
+type Span_TimeEvents struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A collection of `TimeEvent`s.
+ TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
+ // The number of dropped annotations in all the included time events.
+ // If the value is 0, then no annotations were dropped.
+ DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
+ // The number of dropped message events in all the included time events.
+ // If the value is 0, then no message events were dropped.
+ DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
+}
+
+func (x *Span_TimeEvents) Reset() {
+ *x = Span_TimeEvents{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_TimeEvents) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_TimeEvents) ProtoMessage() {}
+
+func (x *Span_TimeEvents) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_TimeEvents.ProtoReflect.Descriptor instead.
+func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
+ if x != nil {
+ return x.TimeEvent
+ }
+ return nil
+}
+
+func (x *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
+ if x != nil {
+ return x.DroppedAnnotationsCount
+ }
+ return 0
+}
+
+func (x *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
+ if x != nil {
+ return x.DroppedMessageEventsCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The relationship of the current span relative to the linked span.
+ Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"`
+ // A set of attributes on the link.
+ Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ // The Tracestate associated with the link.
+ Tracestate *Span_Tracestate `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
+}
+
+func (x *Span_Link) Reset() {
+ *x = Span_Link{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Link) ProtoMessage() {}
+
+func (x *Span_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead.
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *Span_Link) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetType() Span_Link_Type {
+ if x != nil {
+ return x.Type
+ }
+ return Span_Link_TYPE_UNSPECIFIED
+}
+
+func (x *Span_Link) GetAttributes() *Span_Attributes {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span_Link) GetTracestate() *Span_Tracestate {
+ if x != nil {
+ return x.Tracestate
+ }
+ return nil
+}
+
+// A collection of links, which are references from this span to a span
+// in the same or different trace.
+type Span_Links struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A collection of links.
+ Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
+ // The number of dropped links after the maximum size was enforced. If
+ // this value is 0, then no links were dropped.
+ DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+}
+
+func (x *Span_Links) Reset() {
+ *x = Span_Links{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Links) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Links) ProtoMessage() {}
+
+func (x *Span_Links) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Links.ProtoReflect.Descriptor instead.
+func (*Span_Links) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 5}
+}
+
+func (x *Span_Links) GetLink() []*Span_Link {
+ if x != nil {
+ return x.Link
+ }
+ return nil
+}
+
+func (x *Span_Links) GetDroppedLinksCount() int32 {
+ if x != nil {
+ return x.DroppedLinksCount
+ }
+ return 0
+}
+
+type Span_Tracestate_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The key must begin with a lowercase letter, and can only contain
+ // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes
+ // '-', asterisks '*', and forward slashes '/'.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value is opaque string up to 256 characters printable ASCII
+ // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='.
+ // Note that this also excludes tabs, newlines, carriage returns, etc.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Span_Tracestate_Entry) Reset() {
+ *x = Span_Tracestate_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Tracestate_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Tracestate_Entry) ProtoMessage() {}
+
+func (x *Span_Tracestate_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Tracestate_Entry.ProtoReflect.Descriptor instead.
+func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+func (x *Span_Tracestate_Entry) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *Span_Tracestate_Entry) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+// A text annotation with a set of attributes.
+type Span_TimeEvent_Annotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A user-supplied message describing the event.
+ Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // A set of attributes on the annotation.
+ Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
+}
+
+func (x *Span_TimeEvent_Annotation) Reset() {
+ *x = Span_TimeEvent_Annotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_TimeEvent_Annotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_TimeEvent_Annotation) ProtoMessage() {}
+
+func (x *Span_TimeEvent_Annotation) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_TimeEvent_Annotation.ProtoReflect.Descriptor instead.
+func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (x *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
+ if x != nil {
+ return x.Description
+ }
+ return nil
+}
+
+func (x *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+// An event describing a message sent/received between Spans.
+type Span_TimeEvent_MessageEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of MessageEvent. Indicates whether the message was sent or
+ // received.
+ Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
+ // An identifier for the MessageEvent's message that can be used to match
+ // SENT and RECEIVED MessageEvents. For example, this field could
+ // represent a sequence ID for a streaming RPC. It is recommended to be
+ // unique within a Span.
+ Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // The number of uncompressed bytes sent or received.
+ UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"`
+ // The number of compressed bytes sent or received. If zero, assumed to
+ // be the same size as uncompressed.
+ CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"`
+}
+
+func (x *Span_TimeEvent_MessageEvent) Reset() {
+ *x = Span_TimeEvent_MessageEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_TimeEvent_MessageEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
+
+func (x *Span_TimeEvent_MessageEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_TimeEvent_MessageEvent.ProtoReflect.Descriptor instead.
+func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2, 1}
+}
+
+func (x *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
+ if x != nil {
+ return x.Type
+ }
+ return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
+}
+
+func (x *Span_TimeEvent_MessageEvent) GetId() uint64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 {
+ if x != nil {
+ return x.UncompressedSize
+ }
+ return 0
+}
+
+func (x *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 {
+ if x != nil {
+ return x.CompressedSize
+ }
+ return 0
+}
+
+// A single stack frame in a stack trace.
+type StackTrace_StackFrame struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully-qualified name that uniquely identifies the function or
+ // method that is active in this frame.
+ FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
+ // An un-mangled function name, if `function_name` is
+ // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
+ // be fully qualified.
+ OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
+ // The name of the source file where the function call appears.
+ FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ // The line number in `file_name` where the function call appears.
+ LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
+ // The column number where the function call appears, if available.
+ // This is important in JavaScript because of its anonymous functions.
+ ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
+ // The binary module from where the code was loaded.
+ LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
+ // The version of the deployed source code.
+ SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
+}
+
+func (x *StackTrace_StackFrame) Reset() {
+ *x = StackTrace_StackFrame{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StackTrace_StackFrame) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StackTrace_StackFrame) ProtoMessage() {}
+
+func (x *StackTrace_StackFrame) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StackTrace_StackFrame.ProtoReflect.Descriptor instead.
+func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
+ if x != nil {
+ return x.FunctionName
+ }
+ return nil
+}
+
+func (x *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
+ if x != nil {
+ return x.OriginalFunctionName
+ }
+ return nil
+}
+
+func (x *StackTrace_StackFrame) GetFileName() *TruncatableString {
+ if x != nil {
+ return x.FileName
+ }
+ return nil
+}
+
+func (x *StackTrace_StackFrame) GetLineNumber() int64 {
+ if x != nil {
+ return x.LineNumber
+ }
+ return 0
+}
+
+func (x *StackTrace_StackFrame) GetColumnNumber() int64 {
+ if x != nil {
+ return x.ColumnNumber
+ }
+ return 0
+}
+
+func (x *StackTrace_StackFrame) GetLoadModule() *Module {
+ if x != nil {
+ return x.LoadModule
+ }
+ return nil
+}
+
+func (x *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
+ if x != nil {
+ return x.SourceVersion
+ }
+ return nil
+}
+
+// A collection of stack frames, which can be truncated.
+type StackTrace_StackFrames struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Stack frames in this call stack.
+ Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
+ // The number of stack frames that were dropped because there
+ // were too many stack frames.
+ // If this value is 0, then no stack frames were dropped.
+ DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
+}
+
+func (x *StackTrace_StackFrames) Reset() {
+ *x = StackTrace_StackFrames{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StackTrace_StackFrames) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StackTrace_StackFrames) ProtoMessage() {}
+
+func (x *StackTrace_StackFrames) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StackTrace_StackFrames.ProtoReflect.Descriptor instead.
+func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
+ if x != nil {
+ return x.Frame
+ }
+ return nil
+}
+
+func (x *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
+ if x != nil {
+ return x.DroppedFramesCount
+ }
+ return 0
+}
+
+var File_opencensus_proto_trace_v1_trace_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_trace_v1_trace_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x1a, 0x2b, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31,
+ 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x91, 0x16, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x4a, 0x0a,
+ 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x74,
+ 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12,
+ 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e,
+ 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12,
+ 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
+ 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e,
+ 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x4a, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+ 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x46, 0x0a,
+ 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x63, 0x6b,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12,
+ 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x42, 0x0a, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x58,
+ 0x0a, 0x1b, 0x73, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x61,
+ 0x73, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x17, 0x73, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x41, 0x73, 0x50, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x63, 0x68, 0x69, 0x6c,
+ 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x0e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x1a, 0x89, 0x01, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12,
+ 0x4a, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x30, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61,
+ 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x2f, 0x0a, 0x05, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x95, 0x02, 0x0a,
+ 0x0a, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x61, 0x0a, 0x0d, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x41,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0c, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x70, 0x12, 0x38,
+ 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
+ 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x6a, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xa4, 0x05, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69,
+ 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x0d, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0xa8, 0x01, 0x0a, 0x0a, 0x41, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
+ 0x75, 0x74, 0x65, 0x73, 0x1a, 0xfb, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x70,
+ 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x10, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65,
+ 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x63, 0x6f,
+ 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x34, 0x0a, 0x04,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45,
+ 0x4e, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x43, 0x45, 0x49, 0x56, 0x45, 0x44,
+ 0x10, 0x02, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xd3, 0x01, 0x0a, 0x0a,
+ 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x0a, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x19, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64,
+ 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x1a, 0xde, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x3d,
+ 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69,
+ 0x6e, 0x6b, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4a, 0x0a,
+ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a,
+ 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x48, 0x49, 0x4c, 0x44, 0x5f, 0x4c, 0x49, 0x4e,
+ 0x4b, 0x45, 0x44, 0x5f, 0x53, 0x50, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x41,
+ 0x52, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x4e, 0x4b, 0x45, 0x44, 0x5f, 0x53, 0x50, 0x41, 0x4e,
+ 0x10, 0x02, 0x1a, 0x71, 0x0a, 0x05, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x04, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52,
+ 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64,
+ 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3d, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e,
+ 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06,
+ 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x49, 0x45,
+ 0x4e, 0x54, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
+ 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xd1, 0x01, 0x0a,
+ 0x0e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x51, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62,
+ 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x8b, 0x06, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12,
+ 0x54, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61,
+ 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x46,
+ 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x74,
+ 0x72, 0x61, 0x63, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x10, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x48, 0x61,
+ 0x73, 0x68, 0x49, 0x64, 0x1a, 0xed, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72,
+ 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x62, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74,
+ 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x46, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x09, 0x66, 0x69,
+ 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x66, 0x69, 0x6c,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6c, 0x69, 0x6e, 0x65,
+ 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
+ 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0b, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64,
+ 0x75, 0x6c, 0x65, 0x52, 0x0a, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12,
+ 0x53, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65,
+ 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72,
+ 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b,
+ 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14,
+ 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70,
+ 0x70, 0x65, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x97,
+ 0x01, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x6d, 0x6f, 0x64,
+ 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12,
+ 0x47, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72,
+ 0x75, 0x6e, 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52,
+ 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x22, 0x5b, 0x0a, 0x11, 0x54, 0x72, 0x75, 0x6e,
+ 0x63, 0x61, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x12, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x8f, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43,
+ 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72,
+ 0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_trace_v1_trace_proto_rawDescOnce sync.Once
+ file_opencensus_proto_trace_v1_trace_proto_rawDescData = file_opencensus_proto_trace_v1_trace_proto_rawDesc
+)
+
+func file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_trace_v1_trace_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_trace_v1_trace_proto_rawDescData)
+ })
+ return file_opencensus_proto_trace_v1_trace_proto_rawDescData
+}
+
+var file_opencensus_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_opencensus_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
+var file_opencensus_proto_trace_v1_trace_proto_goTypes = []interface{}{
+ (Span_SpanKind)(0), // 0: opencensus.proto.trace.v1.Span.SpanKind
+ (Span_TimeEvent_MessageEvent_Type)(0), // 1: opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent.Type
+ (Span_Link_Type)(0), // 2: opencensus.proto.trace.v1.Span.Link.Type
+ (*Span)(nil), // 3: opencensus.proto.trace.v1.Span
+ (*Status)(nil), // 4: opencensus.proto.trace.v1.Status
+ (*AttributeValue)(nil), // 5: opencensus.proto.trace.v1.AttributeValue
+ (*StackTrace)(nil), // 6: opencensus.proto.trace.v1.StackTrace
+ (*Module)(nil), // 7: opencensus.proto.trace.v1.Module
+ (*TruncatableString)(nil), // 8: opencensus.proto.trace.v1.TruncatableString
+ (*Span_Tracestate)(nil), // 9: opencensus.proto.trace.v1.Span.Tracestate
+ (*Span_Attributes)(nil), // 10: opencensus.proto.trace.v1.Span.Attributes
+ (*Span_TimeEvent)(nil), // 11: opencensus.proto.trace.v1.Span.TimeEvent
+ (*Span_TimeEvents)(nil), // 12: opencensus.proto.trace.v1.Span.TimeEvents
+ (*Span_Link)(nil), // 13: opencensus.proto.trace.v1.Span.Link
+ (*Span_Links)(nil), // 14: opencensus.proto.trace.v1.Span.Links
+ (*Span_Tracestate_Entry)(nil), // 15: opencensus.proto.trace.v1.Span.Tracestate.Entry
+ nil, // 16: opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry
+ (*Span_TimeEvent_Annotation)(nil), // 17: opencensus.proto.trace.v1.Span.TimeEvent.Annotation
+ (*Span_TimeEvent_MessageEvent)(nil), // 18: opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent
+ (*StackTrace_StackFrame)(nil), // 19: opencensus.proto.trace.v1.StackTrace.StackFrame
+ (*StackTrace_StackFrames)(nil), // 20: opencensus.proto.trace.v1.StackTrace.StackFrames
+ (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
+ (*v1.Resource)(nil), // 22: opencensus.proto.resource.v1.Resource
+ (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue
+ (*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value
+}
+var file_opencensus_proto_trace_v1_trace_proto_depIdxs = []int32{
+ 9, // 0: opencensus.proto.trace.v1.Span.tracestate:type_name -> opencensus.proto.trace.v1.Span.Tracestate
+ 8, // 1: opencensus.proto.trace.v1.Span.name:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 0, // 2: opencensus.proto.trace.v1.Span.kind:type_name -> opencensus.proto.trace.v1.Span.SpanKind
+ 21, // 3: opencensus.proto.trace.v1.Span.start_time:type_name -> google.protobuf.Timestamp
+ 21, // 4: opencensus.proto.trace.v1.Span.end_time:type_name -> google.protobuf.Timestamp
+ 10, // 5: opencensus.proto.trace.v1.Span.attributes:type_name -> opencensus.proto.trace.v1.Span.Attributes
+ 6, // 6: opencensus.proto.trace.v1.Span.stack_trace:type_name -> opencensus.proto.trace.v1.StackTrace
+ 12, // 7: opencensus.proto.trace.v1.Span.time_events:type_name -> opencensus.proto.trace.v1.Span.TimeEvents
+ 14, // 8: opencensus.proto.trace.v1.Span.links:type_name -> opencensus.proto.trace.v1.Span.Links
+ 4, // 9: opencensus.proto.trace.v1.Span.status:type_name -> opencensus.proto.trace.v1.Status
+ 22, // 10: opencensus.proto.trace.v1.Span.resource:type_name -> opencensus.proto.resource.v1.Resource
+ 23, // 11: opencensus.proto.trace.v1.Span.same_process_as_parent_span:type_name -> google.protobuf.BoolValue
+ 24, // 12: opencensus.proto.trace.v1.Span.child_span_count:type_name -> google.protobuf.UInt32Value
+ 8, // 13: opencensus.proto.trace.v1.AttributeValue.string_value:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 20, // 14: opencensus.proto.trace.v1.StackTrace.stack_frames:type_name -> opencensus.proto.trace.v1.StackTrace.StackFrames
+ 8, // 15: opencensus.proto.trace.v1.Module.module:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 8, // 16: opencensus.proto.trace.v1.Module.build_id:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 15, // 17: opencensus.proto.trace.v1.Span.Tracestate.entries:type_name -> opencensus.proto.trace.v1.Span.Tracestate.Entry
+ 16, // 18: opencensus.proto.trace.v1.Span.Attributes.attribute_map:type_name -> opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry
+ 21, // 19: opencensus.proto.trace.v1.Span.TimeEvent.time:type_name -> google.protobuf.Timestamp
+ 17, // 20: opencensus.proto.trace.v1.Span.TimeEvent.annotation:type_name -> opencensus.proto.trace.v1.Span.TimeEvent.Annotation
+ 18, // 21: opencensus.proto.trace.v1.Span.TimeEvent.message_event:type_name -> opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent
+ 11, // 22: opencensus.proto.trace.v1.Span.TimeEvents.time_event:type_name -> opencensus.proto.trace.v1.Span.TimeEvent
+ 2, // 23: opencensus.proto.trace.v1.Span.Link.type:type_name -> opencensus.proto.trace.v1.Span.Link.Type
+ 10, // 24: opencensus.proto.trace.v1.Span.Link.attributes:type_name -> opencensus.proto.trace.v1.Span.Attributes
+ 9, // 25: opencensus.proto.trace.v1.Span.Link.tracestate:type_name -> opencensus.proto.trace.v1.Span.Tracestate
+ 13, // 26: opencensus.proto.trace.v1.Span.Links.link:type_name -> opencensus.proto.trace.v1.Span.Link
+ 5, // 27: opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry.value:type_name -> opencensus.proto.trace.v1.AttributeValue
+ 8, // 28: opencensus.proto.trace.v1.Span.TimeEvent.Annotation.description:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 10, // 29: opencensus.proto.trace.v1.Span.TimeEvent.Annotation.attributes:type_name -> opencensus.proto.trace.v1.Span.Attributes
+ 1, // 30: opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent.type:type_name -> opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent.Type
+ 8, // 31: opencensus.proto.trace.v1.StackTrace.StackFrame.function_name:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 8, // 32: opencensus.proto.trace.v1.StackTrace.StackFrame.original_function_name:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 8, // 33: opencensus.proto.trace.v1.StackTrace.StackFrame.file_name:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 7, // 34: opencensus.proto.trace.v1.StackTrace.StackFrame.load_module:type_name -> opencensus.proto.trace.v1.Module
+ 8, // 35: opencensus.proto.trace.v1.StackTrace.StackFrame.source_version:type_name -> opencensus.proto.trace.v1.TruncatableString
+ 19, // 36: opencensus.proto.trace.v1.StackTrace.StackFrames.frame:type_name -> opencensus.proto.trace.v1.StackTrace.StackFrame
+ 37, // [37:37] is the sub-list for method output_type
+ 37, // [37:37] is the sub-list for method input_type
+ 37, // [37:37] is the sub-list for extension type_name
+ 37, // [37:37] is the sub-list for extension extendee
+ 0, // [0:37] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_trace_v1_trace_proto_init() }
+func file_opencensus_proto_trace_v1_trace_proto_init() {
+ if File_opencensus_proto_trace_v1_trace_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AttributeValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StackTrace); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Module); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TruncatableString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Tracestate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Attributes); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_TimeEvent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_TimeEvents); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Link); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Links); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Tracestate_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_TimeEvent_Annotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_TimeEvent_MessageEvent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StackTrace_StackFrame); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StackTrace_StackFrames); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*AttributeValue_StringValue)(nil),
+ (*AttributeValue_IntValue)(nil),
+ (*AttributeValue_BoolValue)(nil),
+ (*AttributeValue_DoubleValue)(nil),
+ }
+ file_opencensus_proto_trace_v1_trace_proto_msgTypes[8].OneofWrappers = []interface{}{
+ (*Span_TimeEvent_Annotation_)(nil),
+ (*Span_TimeEvent_MessageEvent_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_trace_v1_trace_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 18,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opencensus_proto_trace_v1_trace_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_trace_v1_trace_proto_depIdxs,
+ EnumInfos: file_opencensus_proto_trace_v1_trace_proto_enumTypes,
+ MessageInfos: file_opencensus_proto_trace_v1_trace_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_trace_v1_trace_proto = out.File
+ file_opencensus_proto_trace_v1_trace_proto_rawDesc = nil
+ file_opencensus_proto_trace_v1_trace_proto_goTypes = nil
+ file_opencensus_proto_trace_v1_trace_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
new file mode 100644
index 00000000..ee62b2e3
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
@@ -0,0 +1,555 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.17.3
+// source: opencensus/proto/trace/v1/trace_config.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// How spans should be sampled:
+// - Always off
+// - Always on
+// - Always follow the parent Span's decision (off if no parent).
+type ConstantSampler_ConstantDecision int32
+
+const (
+ ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
+ ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
+ ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
+)
+
+// Enum value maps for ConstantSampler_ConstantDecision.
+var (
+ ConstantSampler_ConstantDecision_name = map[int32]string{
+ 0: "ALWAYS_OFF",
+ 1: "ALWAYS_ON",
+ 2: "ALWAYS_PARENT",
+ }
+ ConstantSampler_ConstantDecision_value = map[string]int32{
+ "ALWAYS_OFF": 0,
+ "ALWAYS_ON": 1,
+ "ALWAYS_PARENT": 2,
+ }
+)
+
+func (x ConstantSampler_ConstantDecision) Enum() *ConstantSampler_ConstantDecision {
+ p := new(ConstantSampler_ConstantDecision)
+ *p = x
+ return p
+}
+
+func (x ConstantSampler_ConstantDecision) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ConstantSampler_ConstantDecision) Descriptor() protoreflect.EnumDescriptor {
+ return file_opencensus_proto_trace_v1_trace_config_proto_enumTypes[0].Descriptor()
+}
+
+func (ConstantSampler_ConstantDecision) Type() protoreflect.EnumType {
+ return &file_opencensus_proto_trace_v1_trace_config_proto_enumTypes[0]
+}
+
+func (x ConstantSampler_ConstantDecision) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ConstantSampler_ConstantDecision.Descriptor instead.
+func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// Global configuration of the trace service. All fields must be specified, or
+// the default (zero) values will be used for each type.
+type TraceConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The global default sampler used to make decisions on span sampling.
+ //
+ // Types that are assignable to Sampler:
+ // *TraceConfig_ProbabilitySampler
+ // *TraceConfig_ConstantSampler
+ // *TraceConfig_RateLimitingSampler
+ Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
+ // The global default max number of attributes per span.
+ MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
+ // The global default max number of annotation events per span.
+ MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
+ // The global default max number of message events per span.
+ MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
+ // The global default max number of link entries per span.
+ MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
+}
+
+func (x *TraceConfig) Reset() {
+ *x = TraceConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraceConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraceConfig) ProtoMessage() {}
+
+func (x *TraceConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraceConfig.ProtoReflect.Descriptor instead.
+func (*TraceConfig) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
+ if m != nil {
+ return m.Sampler
+ }
+ return nil
+}
+
+func (x *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
+ if x, ok := x.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
+ return x.ProbabilitySampler
+ }
+ return nil
+}
+
+func (x *TraceConfig) GetConstantSampler() *ConstantSampler {
+ if x, ok := x.GetSampler().(*TraceConfig_ConstantSampler); ok {
+ return x.ConstantSampler
+ }
+ return nil
+}
+
+func (x *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
+ if x, ok := x.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
+ return x.RateLimitingSampler
+ }
+ return nil
+}
+
+func (x *TraceConfig) GetMaxNumberOfAttributes() int64 {
+ if x != nil {
+ return x.MaxNumberOfAttributes
+ }
+ return 0
+}
+
+func (x *TraceConfig) GetMaxNumberOfAnnotations() int64 {
+ if x != nil {
+ return x.MaxNumberOfAnnotations
+ }
+ return 0
+}
+
+func (x *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
+ if x != nil {
+ return x.MaxNumberOfMessageEvents
+ }
+ return 0
+}
+
+func (x *TraceConfig) GetMaxNumberOfLinks() int64 {
+ if x != nil {
+ return x.MaxNumberOfLinks
+ }
+ return 0
+}
+
+type isTraceConfig_Sampler interface {
+ isTraceConfig_Sampler()
+}
+
+type TraceConfig_ProbabilitySampler struct {
+ ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
+}
+
+type TraceConfig_ConstantSampler struct {
+ ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
+}
+
+type TraceConfig_RateLimitingSampler struct {
+ RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
+}
+
+func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
+
+func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
+
+func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
+
+// Sampler that tries to uniformly sample traces with a given probability.
+// The probability of sampling a trace is equal to that of the specified probability.
+type ProbabilitySampler struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The desired probability of sampling. Must be within [0.0, 1.0].
+ SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
+}
+
+func (x *ProbabilitySampler) Reset() {
+ *x = ProbabilitySampler{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProbabilitySampler) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProbabilitySampler) ProtoMessage() {}
+
+func (x *ProbabilitySampler) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProbabilitySampler.ProtoReflect.Descriptor instead.
+func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ProbabilitySampler) GetSamplingProbability() float64 {
+ if x != nil {
+ return x.SamplingProbability
+ }
+ return 0
+}
+
+// Sampler that always makes a constant decision on span sampling.
+type ConstantSampler struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
+}
+
+func (x *ConstantSampler) Reset() {
+ *x = ConstantSampler{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConstantSampler) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConstantSampler) ProtoMessage() {}
+
+func (x *ConstantSampler) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConstantSampler.ProtoReflect.Descriptor instead.
+func (*ConstantSampler) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
+ if x != nil {
+ return x.Decision
+ }
+ return ConstantSampler_ALWAYS_OFF
+}
+
+// Sampler that tries to sample with a rate per time window.
+type RateLimitingSampler struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Rate per second.
+ Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
+}
+
+func (x *RateLimitingSampler) Reset() {
+ *x = RateLimitingSampler{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RateLimitingSampler) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RateLimitingSampler) ProtoMessage() {}
+
+func (x *RateLimitingSampler) ProtoReflect() protoreflect.Message {
+ mi := &file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RateLimitingSampler.ProtoReflect.Descriptor instead.
+func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RateLimitingSampler) GetQps() int64 {
+ if x != nil {
+ return x.Qps
+ }
+ return 0
+}
+
+var File_opencensus_proto_trace_v1_trace_config_proto protoreflect.FileDescriptor
+
+var file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19,
+ 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x9c, 0x04, 0x0a, 0x0b, 0x54, 0x72,
+ 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x70, 0x72, 0x6f,
+ 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x61,
+ 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x79, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x63,
+ 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x15, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61,
+ 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61,
+ 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x4f, 0x66, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3e,
+ 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f,
+ 0x66, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2d,
+ 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f,
+ 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x61, 0x78,
+ 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x42, 0x09, 0x0a,
+ 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x22, 0x46, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x62,
+ 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x30,
+ 0x0a, 0x13, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x13, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
+ 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x0a,
+ 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x46, 0x46, 0x10,
+ 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x4e, 0x10, 0x01,
+ 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e,
+ 0x54, 0x10, 0x02, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, 0x42, 0x95, 0x01, 0x0a,
+ 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65,
+ 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73,
+ 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72, 0x61, 0x63, 0x65,
+ 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opencensus_proto_trace_v1_trace_config_proto_rawDescOnce sync.Once
+ file_opencensus_proto_trace_v1_trace_config_proto_rawDescData = file_opencensus_proto_trace_v1_trace_config_proto_rawDesc
+)
+
+func file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP() []byte {
+ file_opencensus_proto_trace_v1_trace_config_proto_rawDescOnce.Do(func() {
+ file_opencensus_proto_trace_v1_trace_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_opencensus_proto_trace_v1_trace_config_proto_rawDescData)
+ })
+ return file_opencensus_proto_trace_v1_trace_config_proto_rawDescData
+}
+
+var file_opencensus_proto_trace_v1_trace_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_opencensus_proto_trace_v1_trace_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_opencensus_proto_trace_v1_trace_config_proto_goTypes = []interface{}{
+ (ConstantSampler_ConstantDecision)(0), // 0: opencensus.proto.trace.v1.ConstantSampler.ConstantDecision
+ (*TraceConfig)(nil), // 1: opencensus.proto.trace.v1.TraceConfig
+ (*ProbabilitySampler)(nil), // 2: opencensus.proto.trace.v1.ProbabilitySampler
+ (*ConstantSampler)(nil), // 3: opencensus.proto.trace.v1.ConstantSampler
+ (*RateLimitingSampler)(nil), // 4: opencensus.proto.trace.v1.RateLimitingSampler
+}
+var file_opencensus_proto_trace_v1_trace_config_proto_depIdxs = []int32{
+ 2, // 0: opencensus.proto.trace.v1.TraceConfig.probability_sampler:type_name -> opencensus.proto.trace.v1.ProbabilitySampler
+ 3, // 1: opencensus.proto.trace.v1.TraceConfig.constant_sampler:type_name -> opencensus.proto.trace.v1.ConstantSampler
+ 4, // 2: opencensus.proto.trace.v1.TraceConfig.rate_limiting_sampler:type_name -> opencensus.proto.trace.v1.RateLimitingSampler
+ 0, // 3: opencensus.proto.trace.v1.ConstantSampler.decision:type_name -> opencensus.proto.trace.v1.ConstantSampler.ConstantDecision
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_opencensus_proto_trace_v1_trace_config_proto_init() }
+func file_opencensus_proto_trace_v1_trace_config_proto_init() {
+ if File_opencensus_proto_trace_v1_trace_config_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TraceConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProbabilitySampler); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConstantSampler); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RateLimitingSampler); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opencensus_proto_trace_v1_trace_config_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*TraceConfig_ProbabilitySampler)(nil),
+ (*TraceConfig_ConstantSampler)(nil),
+ (*TraceConfig_RateLimitingSampler)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opencensus_proto_trace_v1_trace_config_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opencensus_proto_trace_v1_trace_config_proto_goTypes,
+ DependencyIndexes: file_opencensus_proto_trace_v1_trace_config_proto_depIdxs,
+ EnumInfos: file_opencensus_proto_trace_v1_trace_config_proto_enumTypes,
+ MessageInfos: file_opencensus_proto_trace_v1_trace_config_proto_msgTypes,
+ }.Build()
+ File_opencensus_proto_trace_v1_trace_config_proto = out.File
+ file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = nil
+ file_opencensus_proto_trace_v1_trace_config_proto_goTypes = nil
+ file_opencensus_proto_trace_v1_trace_config_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/coreos/go-oidc/v3/LICENSE b/vendor/github.com/coreos/go-oidc/v3/LICENSE
new file mode 100644
index 00000000..e06d2081
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/coreos/go-oidc/v3/NOTICE b/vendor/github.com/coreos/go-oidc/v3/NOTICE
new file mode 100644
index 00000000..b39ddfa5
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
new file mode 100644
index 00000000..f42d37d4
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
@@ -0,0 +1,32 @@
+package oidc
+
+import jose "github.com/go-jose/go-jose/v4"
+
+// JOSE asymmetric signing algorithm values as defined by RFC 7518
+//
+// see: https://tools.ietf.org/html/rfc7518#section-3.1
+const (
+ RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
+ RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
+ RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
+ ES256 = "ES256" // ECDSA using P-256 and SHA-256
+ ES384 = "ES384" // ECDSA using P-384 and SHA-384
+ ES512 = "ES512" // ECDSA using P-521 and SHA-512
+ PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
+ PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
+ PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
+ EdDSA = "EdDSA" // Ed25519 using SHA-512
+)
+
+var allAlgs = []jose.SignatureAlgorithm{
+ jose.RS256,
+ jose.RS384,
+ jose.RS512,
+ jose.ES256,
+ jose.ES384,
+ jose.ES512,
+ jose.PS256,
+ jose.PS384,
+ jose.PS512,
+ jose.EdDSA,
+}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
new file mode 100644
index 00000000..6a846ece
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
@@ -0,0 +1,269 @@
+package oidc
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+ "time"
+
+ jose "github.com/go-jose/go-jose/v4"
+)
+
+// StaticKeySet is a verifier that validates JWT against a static set of public keys.
+type StaticKeySet struct {
+ // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and
+ // *ecdsa.PublicKey.
+ PublicKeys []crypto.PublicKey
+}
+
+// VerifySignature compares the signature against a static set of public keys.
+func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
+ // Algorithms are already checked by Verifier, so this parse method accepts
+ // any algorithm.
+ jws, err := jose.ParseSigned(jwt, allAlgs)
+ if err != nil {
+ return nil, fmt.Errorf("parsing jwt: %v", err)
+ }
+ for _, pub := range s.PublicKeys {
+ switch pub.(type) {
+ case *rsa.PublicKey:
+ case *ecdsa.PublicKey:
+ case ed25519.PublicKey:
+ default:
+ return nil, fmt.Errorf("invalid public key type provided: %T", pub)
+ }
+ payload, err := jws.Verify(pub)
+ if err != nil {
+ continue
+ }
+ return payload, nil
+ }
+ return nil, fmt.Errorf("no public keys able to verify jwt")
+}
+
+// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
+// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
+// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
+// exposed for providers that don't support discovery or to prevent round trips to the
+// discovery URL.
+//
+// The returned KeySet is a long lived verifier that caches keys based on any
+// keys change. Reuse a common remote key set instead of creating new ones as needed.
+func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
+ return newRemoteKeySet(ctx, jwksURL, time.Now)
+}
+
+func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
+ if now == nil {
+ now = time.Now
+ }
+ return &RemoteKeySet{
+ jwksURL: jwksURL,
+ now: now,
+ // For historical reasons, this package uses contexts for configuration, not just
+ // cancellation. In hindsight, this was a bad idea.
+ //
+ // Attemps to reason about how cancels should work with background requests have
+ // largely lead to confusion. Use the context here as a config bag-of-values and
+ // ignore the cancel function.
+ ctx: context.WithoutCancel(ctx),
+ }
+}
+
+// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
+// a jwks_uri endpoint.
+type RemoteKeySet struct {
+ jwksURL string
+ now func() time.Time
+
+ // Used for configuration. Cancelation is ignored.
+ ctx context.Context
+
+ // guard all other fields
+ mu sync.RWMutex
+
+ // inflight suppresses parallel execution of updateKeys and allows
+ // multiple goroutines to wait for its result.
+ inflight *inflight
+
+ // A set of cached keys.
+ cachedKeys []jose.JSONWebKey
+}
+
+// inflight is used to wait on some in-flight request from multiple goroutines.
+type inflight struct {
+ doneCh chan struct{}
+
+ keys []jose.JSONWebKey
+ err error
+}
+
+func newInflight() *inflight {
+ return &inflight{doneCh: make(chan struct{})}
+}
+
+// wait returns a channel that multiple goroutines can receive on. Once it returns
+// a value, the inflight request is done and result() can be inspected.
+func (i *inflight) wait() <-chan struct{} {
+ return i.doneCh
+}
+
+// done can only be called by a single goroutine. It records the result of the
+// inflight request and signals other goroutines that the result is safe to
+// inspect.
+func (i *inflight) done(keys []jose.JSONWebKey, err error) {
+ i.keys = keys
+ i.err = err
+ close(i.doneCh)
+}
+
+// result cannot be called until the wait() channel has returned a value.
+func (i *inflight) result() ([]jose.JSONWebKey, error) {
+ return i.keys, i.err
+}
+
+// paresdJWTKey is a context key that allows common setups to avoid parsing the
+// JWT twice. It holds a *jose.JSONWebSignature value.
+var parsedJWTKey contextKey
+
+// VerifySignature validates a payload against a signature from the jwks_uri.
+//
+// Users MUST NOT call this method directly and should use an IDTokenVerifier
+// instead. This method skips critical validations such as 'alg' values and is
+// only exported to implement the KeySet interface.
+func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
+ jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature)
+ if !ok {
+ // The algorithm values are already enforced by the Validator, which also sets
+ // the context value above to pre-parsed signature.
+ //
+ // Practically, this codepath isn't called in normal use of this package, but
+ // if it is, the algorithms have already been checked.
+ var err error
+ jws, err = jose.ParseSigned(jwt, allAlgs)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+ }
+ }
+ return r.verify(ctx, jws)
+}
+
+func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
+ // We don't support JWTs signed with multiple signatures.
+ keyID := ""
+ for _, sig := range jws.Signatures {
+ keyID = sig.Header.KeyID
+ break
+ }
+
+ keys := r.keysFromCache()
+ for _, key := range keys {
+ if keyID == "" || key.KeyID == keyID {
+ if payload, err := jws.Verify(&key); err == nil {
+ return payload, nil
+ }
+ }
+ }
+
+ // If the kid doesn't match, check for new keys from the remote. This is the
+ // strategy recommended by the spec.
+ //
+ // https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
+ keys, err := r.keysFromRemote(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("fetching keys %w", err)
+ }
+
+ for _, key := range keys {
+ if keyID == "" || key.KeyID == keyID {
+ if payload, err := jws.Verify(&key); err == nil {
+ return payload, nil
+ }
+ }
+ }
+ return nil, errors.New("failed to verify id token signature")
+}
+
+func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.cachedKeys
+}
+
+// keysFromRemote syncs the key set from the remote set, records the values in the
+// cache, and returns the key set.
+func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
+ // Need to lock to inspect the inflight request field.
+ r.mu.Lock()
+ // If there's not a current inflight request, create one.
+ if r.inflight == nil {
+ r.inflight = newInflight()
+
+ // This goroutine has exclusive ownership over the current inflight
+ // request. It releases the resource by nil'ing the inflight field
+ // once the goroutine is done.
+ go func() {
+ // Sync keys and finish inflight when that's done.
+ keys, err := r.updateKeys()
+
+ r.inflight.done(keys, err)
+
+ // Lock to update the keys and indicate that there is no longer an
+ // inflight request.
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if err == nil {
+ r.cachedKeys = keys
+ }
+
+ // Free inflight so a different request can run.
+ r.inflight = nil
+ }()
+ }
+ inflight := r.inflight
+ r.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-inflight.wait():
+ return inflight.result()
+ }
+}
+
+func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
+ req, err := http.NewRequest("GET", r.jwksURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: can't create request: %v", err)
+ }
+
+ resp, err := doRequest(r.ctx, req)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: get keys failed %w", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read response body: %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
+ }
+
+ var keySet jose.JSONWebKeySet
+ err = unmarshalResp(resp, body, &keySet)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
+ }
+ return keySet.Keys, nil
+}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
new file mode 100644
index 00000000..17419f38
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
@@ -0,0 +1,554 @@
+// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
+package oidc
+
+import (
+ "context"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "mime"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/oauth2"
+)
+
+const (
+ // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
+ ScopeOpenID = "openid"
+
+ // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
+ // OAuth2 refresh tokens.
+ //
+ // Support for this scope differs between OpenID Connect providers. For instance
+ // Google rejects it, favoring appending "access_type=offline" as part of the
+ // authorization request instead.
+ //
+ // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
+ ScopeOfflineAccess = "offline_access"
+)
+
+var (
+ errNoAtHash = errors.New("id token did not have an access token hash")
+ errInvalidAtHash = errors.New("access token hash does not match value in ID token")
+)
+
+type contextKey int
+
+var issuerURLKey contextKey
+
+// ClientContext returns a new Context that carries the provided HTTP client.
+//
+// This method sets the same context key used by the golang.org/x/oauth2 package,
+// so the returned context works for that package too.
+//
+// myClient := &http.Client{}
+// ctx := oidc.ClientContext(parentContext, myClient)
+//
+// // This will use the custom client
+// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
+func ClientContext(ctx context.Context, client *http.Client) context.Context {
+ return context.WithValue(ctx, oauth2.HTTPClient, client)
+}
+
+func getClient(ctx context.Context) *http.Client {
+ if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
+ return c
+ }
+ return nil
+}
+
+// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
+// by upstream is mismatched with the discovery URL. This is meant for integration
+// with off-spec providers such as Azure.
+//
+// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
+// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
+//
+// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
+//
+// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
+// // for future issuer validation.
+// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
+//
+// This is insecure because validating the correct issuer is critical for multi-tenant
+// providers. Any overrides here MUST be carefully reviewed.
+func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
+ return context.WithValue(ctx, issuerURLKey, issuerURL)
+}
+
+func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
+ client := http.DefaultClient
+ if c := getClient(ctx); c != nil {
+ client = c
+ }
+ return client.Do(req.WithContext(ctx))
+}
+
+// Provider represents an OpenID Connect server's configuration.
+type Provider struct {
+ issuer string
+ authURL string
+ tokenURL string
+ deviceAuthURL string
+ userInfoURL string
+ jwksURL string
+ algorithms []string
+
+ // Raw claims returned by the server.
+ rawClaims []byte
+
+ // Guards all of the following fields.
+ mu sync.Mutex
+ // HTTP client specified from the initial NewProvider request. This is used
+ // when creating the common key set.
+ client *http.Client
+ // A key set that uses context.Background() and is shared between all code paths
+ // that don't have a convinent way of supplying a unique context.
+ commonRemoteKeySet KeySet
+}
+
+func (p *Provider) remoteKeySet() KeySet {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.commonRemoteKeySet == nil {
+ ctx := context.Background()
+ if p.client != nil {
+ ctx = ClientContext(ctx, p.client)
+ }
+ p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL)
+ }
+ return p.commonRemoteKeySet
+}
+
+type providerJSON struct {
+ Issuer string `json:"issuer"`
+ AuthURL string `json:"authorization_endpoint"`
+ TokenURL string `json:"token_endpoint"`
+ DeviceAuthURL string `json:"device_authorization_endpoint"`
+ JWKSURL string `json:"jwks_uri"`
+ UserInfoURL string `json:"userinfo_endpoint"`
+ Algorithms []string `json:"id_token_signing_alg_values_supported"`
+}
+
+// supportedAlgorithms is a list of algorithms explicitly supported by this
+// package. If a provider supports other algorithms, such as HS256 or none,
+// those values won't be passed to the IDTokenVerifier.
+var supportedAlgorithms = map[string]bool{
+ RS256: true,
+ RS384: true,
+ RS512: true,
+ ES256: true,
+ ES384: true,
+ ES512: true,
+ PS256: true,
+ PS384: true,
+ PS512: true,
+ EdDSA: true,
+}
+
+// ProviderConfig allows creating providers when discovery isn't supported. It's
+// generally easier to use NewProvider directly.
+type ProviderConfig struct {
+ // IssuerURL is the identity of the provider, and the string it uses to sign
+ // ID tokens with. For example "https://accounts.google.com". This value MUST
+ // match ID tokens exactly.
+ IssuerURL string
+ // AuthURL is the endpoint used by the provider to support the OAuth 2.0
+ // authorization endpoint.
+ AuthURL string
+ // TokenURL is the endpoint used by the provider to support the OAuth 2.0
+ // token endpoint.
+ TokenURL string
+ // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
+ // device authorization endpoint.
+ DeviceAuthURL string
+ // UserInfoURL is the endpoint used by the provider to support the OpenID
+ // Connect UserInfo flow.
+ //
+ // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
+ UserInfoURL string
+ // JWKSURL is the endpoint used by the provider to advertise public keys to
+ // verify issued ID tokens. This endpoint is polled as new keys are made
+ // available.
+ JWKSURL string
+
+ // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
+ // ID tokens. If not provided, this defaults to the algorithms advertised by
+ // the JWK endpoint, then the set of algorithms supported by this package.
+ Algorithms []string
+}
+
+// NewProvider initializes a provider from a set of endpoints, rather than
+// through discovery.
+func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
+ return &Provider{
+ issuer: p.IssuerURL,
+ authURL: p.AuthURL,
+ tokenURL: p.TokenURL,
+ deviceAuthURL: p.DeviceAuthURL,
+ userInfoURL: p.UserInfoURL,
+ jwksURL: p.JWKSURL,
+ algorithms: p.Algorithms,
+ client: getClient(ctx),
+ }
+}
+
+// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
+//
+// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
+// or "https://login.salesforce.com".
+func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
+ wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
+ req, err := http.NewRequest("GET", wellKnown, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := doRequest(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read response body: %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s: %s", resp.Status, body)
+ }
+
+ var p providerJSON
+ err = unmarshalResp(resp, body, &p)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
+ }
+
+ issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string)
+ if !skipIssuerValidation {
+ issuerURL = issuer
+ }
+ if p.Issuer != issuerURL && !skipIssuerValidation {
+ return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
+ }
+ var algs []string
+ for _, a := range p.Algorithms {
+ if supportedAlgorithms[a] {
+ algs = append(algs, a)
+ }
+ }
+ return &Provider{
+ issuer: issuerURL,
+ authURL: p.AuthURL,
+ tokenURL: p.TokenURL,
+ deviceAuthURL: p.DeviceAuthURL,
+ userInfoURL: p.UserInfoURL,
+ jwksURL: p.JWKSURL,
+ algorithms: algs,
+ rawClaims: body,
+ client: getClient(ctx),
+ }, nil
+}
+
+// Claims unmarshals raw fields returned by the server during discovery.
+//
+// var claims struct {
+// ScopesSupported []string `json:"scopes_supported"`
+// ClaimsSupported []string `json:"claims_supported"`
+// }
+//
+// if err := provider.Claims(&claims); err != nil {
+// // handle unmarshaling error
+// }
+//
+// For a list of fields defined by the OpenID Connect spec see:
+// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+func (p *Provider) Claims(v interface{}) error {
+ if p.rawClaims == nil {
+ return errors.New("oidc: claims not set")
+ }
+ return json.Unmarshal(p.rawClaims, v)
+}
+
+// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
+func (p *Provider) Endpoint() oauth2.Endpoint {
+ return oauth2.Endpoint{AuthURL: p.authURL, DeviceAuthURL: p.deviceAuthURL, TokenURL: p.tokenURL}
+}
+
+// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given
+// provider.
+func (p *Provider) UserInfoEndpoint() string {
+ return p.userInfoURL
+}
+
+// UserInfo represents the OpenID Connect userinfo claims.
+type UserInfo struct {
+ Subject string `json:"sub"`
+ Profile string `json:"profile"`
+ Email string `json:"email"`
+ EmailVerified bool `json:"email_verified"`
+
+ claims []byte
+}
+
+type userInfoRaw struct {
+ Subject string `json:"sub"`
+ Profile string `json:"profile"`
+ Email string `json:"email"`
+ // Handle providers that return email_verified as a string
+ // https://forums.aws.amazon.com/thread.jspa?messageID=949441 and
+ // https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
+ EmailVerified stringAsBool `json:"email_verified"`
+}
+
+// Claims unmarshals the raw JSON object claims into the provided object.
+func (u *UserInfo) Claims(v interface{}) error {
+ if u.claims == nil {
+ return errors.New("oidc: claims not set")
+ }
+ return json.Unmarshal(u.claims, v)
+}
+
+// UserInfo uses the token source to query the provider's user info endpoint.
+func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
+ if p.userInfoURL == "" {
+ return nil, errors.New("oidc: user info endpoint is not supported by this provider")
+ }
+
+ req, err := http.NewRequest("GET", p.userInfoURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: create GET request: %v", err)
+ }
+
+ token, err := tokenSource.Token()
+ if err != nil {
+ return nil, fmt.Errorf("oidc: get access token: %v", err)
+ }
+ token.SetAuthHeader(req)
+
+ resp, err := doRequest(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s: %s", resp.Status, body)
+ }
+
+ ct := resp.Header.Get("Content-Type")
+ mediaType, _, parseErr := mime.ParseMediaType(ct)
+ if parseErr == nil && mediaType == "application/jwt" {
+ payload, err := p.remoteKeySet().VerifySignature(ctx, string(body))
+ if err != nil {
+ return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
+ }
+ body = payload
+ }
+
+ var userInfo userInfoRaw
+ if err := json.Unmarshal(body, &userInfo); err != nil {
+ return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
+ }
+ return &UserInfo{
+ Subject: userInfo.Subject,
+ Profile: userInfo.Profile,
+ Email: userInfo.Email,
+ EmailVerified: bool(userInfo.EmailVerified),
+ claims: body,
+ }, nil
+}
+
+// IDToken is an OpenID Connect extension that provides a predictable representation
+// of an authorization event.
+//
+// The ID Token only holds fields OpenID Connect requires. To access additional
+// claims returned by the server, use the Claims method.
+type IDToken struct {
+ // The URL of the server which issued this token. OpenID Connect
+ // requires this value always be identical to the URL used for
+ // initial discovery.
+ //
+ // Note: Because of a known issue with Google Accounts' implementation
+ // this value may differ when using Google.
+ //
+ // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
+ Issuer string
+
+ // The client ID, or set of client IDs, that this token is issued for. For
+ // common uses, this is the client that initialized the auth flow.
+ //
+ // This package ensures the audience contains an expected value.
+ Audience []string
+
+ // A unique string which identifies the end user.
+ Subject string
+
+ // Expiry of the token. Ths package will not process tokens that have
+ // expired unless that validation is explicitly turned off.
+ Expiry time.Time
+ // When the token was issued by the provider.
+ IssuedAt time.Time
+
+ // Initial nonce provided during the authentication redirect.
+ //
+ // This package does NOT provided verification on the value of this field
+ // and it's the user's responsibility to ensure it contains a valid value.
+ Nonce string
+
+ // at_hash claim, if set in the ID token. Callers can verify an access token
+ // that corresponds to the ID token using the VerifyAccessToken method.
+ AccessTokenHash string
+
+ // signature algorithm used for ID token, needed to compute a verification hash of an
+ // access token
+ sigAlgorithm string
+
+ // Raw payload of the id_token.
+ claims []byte
+
+ // Map of distributed claim names to claim sources
+ distributedClaims map[string]claimSource
+}
+
+// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
+//
+// idToken, err := idTokenVerifier.Verify(rawIDToken)
+// if err != nil {
+// // handle error
+// }
+// var claims struct {
+// Email string `json:"email"`
+// EmailVerified bool `json:"email_verified"`
+// }
+// if err := idToken.Claims(&claims); err != nil {
+// // handle error
+// }
+func (i *IDToken) Claims(v interface{}) error {
+ if i.claims == nil {
+ return errors.New("oidc: claims not set")
+ }
+ return json.Unmarshal(i.claims, v)
+}
+
+// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
+// matches the hash in the id token. It returns an error if the hashes don't match.
+// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
+// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
+func (i *IDToken) VerifyAccessToken(accessToken string) error {
+ if i.AccessTokenHash == "" {
+ return errNoAtHash
+ }
+ var h hash.Hash
+ switch i.sigAlgorithm {
+ case RS256, ES256, PS256:
+ h = sha256.New()
+ case RS384, ES384, PS384:
+ h = sha512.New384()
+ case RS512, ES512, PS512, EdDSA:
+ h = sha512.New()
+ default:
+ return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
+ }
+ h.Write([]byte(accessToken)) // hash documents that Write will never return an error
+ sum := h.Sum(nil)[:h.Size()/2]
+ actual := base64.RawURLEncoding.EncodeToString(sum)
+ if actual != i.AccessTokenHash {
+ return errInvalidAtHash
+ }
+ return nil
+}
+
+type idToken struct {
+ Issuer string `json:"iss"`
+ Subject string `json:"sub"`
+ Audience audience `json:"aud"`
+ Expiry jsonTime `json:"exp"`
+ IssuedAt jsonTime `json:"iat"`
+ NotBefore *jsonTime `json:"nbf"`
+ Nonce string `json:"nonce"`
+ AtHash string `json:"at_hash"`
+ ClaimNames map[string]string `json:"_claim_names"`
+ ClaimSources map[string]claimSource `json:"_claim_sources"`
+}
+
+type claimSource struct {
+ Endpoint string `json:"endpoint"`
+ AccessToken string `json:"access_token"`
+}
+
+type stringAsBool bool
+
+func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
+ switch string(b) {
+ case "true", `"true"`:
+ *sb = true
+ case "false", `"false"`:
+ *sb = false
+ default:
+ return errors.New("invalid value for boolean")
+ }
+ return nil
+}
+
+type audience []string
+
+func (a *audience) UnmarshalJSON(b []byte) error {
+ var s string
+ if json.Unmarshal(b, &s) == nil {
+ *a = audience{s}
+ return nil
+ }
+ var auds []string
+ if err := json.Unmarshal(b, &auds); err != nil {
+ return err
+ }
+ *a = auds
+ return nil
+}
+
+type jsonTime time.Time
+
+func (j *jsonTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ if err := json.Unmarshal(b, &n); err != nil {
+ return err
+ }
+ var unix int64
+
+ if t, err := n.Int64(); err == nil {
+ unix = t
+ } else {
+ f, err := n.Float64()
+ if err != nil {
+ return err
+ }
+ unix = int64(f)
+ }
+ *j = jsonTime(time.Unix(unix, 0))
+ return nil
+}
+
+func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
+ err := json.Unmarshal(body, &v)
+ if err == nil {
+ return nil
+ }
+ ct := r.Header.Get("Content-Type")
+ mediaType, _, parseErr := mime.ParseMediaType(ct)
+ if parseErr == nil && mediaType == "application/json" {
+ return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
+ }
+ return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
+}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
new file mode 100644
index 00000000..52b27b74
--- /dev/null
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
@@ -0,0 +1,355 @@
+package oidc
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+
+ jose "github.com/go-jose/go-jose/v4"
+ "golang.org/x/oauth2"
+)
+
+const (
+ issuerGoogleAccounts = "https://accounts.google.com"
+ issuerGoogleAccountsNoScheme = "accounts.google.com"
+)
+
+// TokenExpiredError indicates that Verify failed because the token was expired. This
+// error does NOT indicate that the token is not also invalid for other reasons. Other
+// checks might have failed if the expiration check had not failed.
+type TokenExpiredError struct {
+ // Expiry is the time when the token expired.
+ Expiry time.Time
+}
+
+func (e *TokenExpiredError) Error() string {
+ return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry)
+}
+
+// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
+// of JSON web tokens. This is expected to be backed by a remote key set through
+// provider metadata discovery or an in-memory set of keys delivered out-of-band.
+type KeySet interface {
+ // VerifySignature parses the JSON web token, verifies the signature, and returns
+ // the raw payload. Header and claim fields are validated by other parts of the
+ // package. For example, the KeySet does not need to check values such as signature
+ // algorithm, issuer, and audience since the IDTokenVerifier validates these values
+ // independently.
+ //
+ // If VerifySignature makes HTTP requests to verify the token, it's expected to
+ // use any HTTP client associated with the context through ClientContext.
+ VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
+}
+
+// IDTokenVerifier provides verification for ID Tokens.
+type IDTokenVerifier struct {
+ keySet KeySet
+ config *Config
+ issuer string
+}
+
+// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
+//
+// It's easier to use provider discovery to construct an IDTokenVerifier than creating
+// one directly. This method is intended to be used with provider that don't support
+// metadata discovery, or avoiding round trips when the key set URL is already known.
+//
+// This constructor can be used to create a verifier directly using the issuer URL and
+// JSON Web Key Set URL without using discovery:
+//
+// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
+// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
+//
+// Or a static key set (e.g. for testing):
+//
+// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
+// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
+func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
+ return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
+}
+
+// Config is the configuration for an IDTokenVerifier.
+type Config struct {
+ // Expected audience of the token. For a majority of the cases this is expected to be
+ // the ID of the client that initialized the login flow. It may occasionally differ if
+ // the provider supports the authorizing party (azp) claim.
+ //
+ // If not provided, users must explicitly set SkipClientIDCheck.
+ ClientID string
+ // If specified, only this set of algorithms may be used to sign the JWT.
+ //
+ // If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
+ // defaults to the set of algorithms the provider supports. Otherwise this values
+ // defaults to RS256.
+ SupportedSigningAlgs []string
+
+ // If true, no ClientID check performed. Must be true if ClientID field is empty.
+ SkipClientIDCheck bool
+ // If true, token expiry is not checked.
+ SkipExpiryCheck bool
+
+ // SkipIssuerCheck is intended for specialized cases where the the caller wishes to
+ // defer issuer validation. When enabled, callers MUST independently verify the Token's
+ // Issuer is a known good value.
+ //
+ // Mismatched issuers often indicate client mis-configuration. If mismatches are
+ // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
+ // this option.
+ SkipIssuerCheck bool
+
+ // Time function to check Token expiry. Defaults to time.Now
+ Now func() time.Time
+
+ // InsecureSkipSignatureCheck causes this package to skip JWT signature validation.
+ // It's intended for special cases where providers (such as Azure), use the "none"
+ // algorithm.
+ //
+ // This option can only be enabled safely when the ID Token is received directly
+ // from the provider after the token exchange.
+ //
+ // This option MUST NOT be used when receiving an ID Token from sources other
+ // than the token endpoint.
+ InsecureSkipSignatureCheck bool
+}
+
+// VerifierContext returns an IDTokenVerifier that uses the provider's key set to
+// verify JWTs. As opposed to Verifier, the context is used to configure requests
+// to the upstream JWKs endpoint. The provided context's cancellation is ignored.
+func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier {
+ return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config)
+}
+
+// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
+//
+// The returned verifier uses a background context for all requests to the upstream
+// JWKs endpoint. To control that context, use VerifierContext instead.
+func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
+ return p.newVerifier(p.remoteKeySet(), config)
+}
+
+func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier {
+ if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
+ // Make a copy so we don't modify the config values.
+ cp := &Config{}
+ *cp = *config
+ cp.SupportedSigningAlgs = p.algorithms
+ config = cp
+ }
+ return NewVerifier(p.issuer, keySet, config)
+}
+
+func parseJWT(p string) ([]byte, error) {
+ parts := strings.Split(p, ".")
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
+ }
+ payload, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
+ }
+ return payload, nil
+}
+
+func contains(sli []string, ele string) bool {
+ for _, s := range sli {
+ if s == ele {
+ return true
+ }
+ }
+ return false
+}
+
+// Returns the Claims from the distributed JWT token
+func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
+ req, err := http.NewRequest("GET", src.Endpoint, nil)
+ if err != nil {
+ return nil, fmt.Errorf("malformed request: %v", err)
+ }
+ if src.AccessToken != "" {
+ req.Header.Set("Authorization", "Bearer "+src.AccessToken)
+ }
+
+ resp, err := doRequest(ctx, req)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read response body: %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
+ }
+
+ token, err := verifier.Verify(ctx, string(body))
+ if err != nil {
+ return nil, fmt.Errorf("malformed response body: %v", err)
+ }
+
+ return token.claims, nil
+}
+
+// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
+// any additional checks depending on the Config, and returns the payload.
+//
+// Verify does NOT do nonce validation, which is the callers responsibility.
+//
+// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
+//
+// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
+// if err != nil {
+// // handle error
+// }
+//
+// // Extract the ID Token from oauth2 token.
+// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+// if !ok {
+// // handle error
+// }
+//
+// token, err := verifier.Verify(ctx, rawIDToken)
+func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
+ // Throw out tokens with invalid claims before trying to verify the token. This lets
+ // us do cheap checks before possibly re-syncing keys.
+ payload, err := parseJWT(rawIDToken)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+ }
+ var token idToken
+ if err := json.Unmarshal(payload, &token); err != nil {
+ return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
+ }
+
+ distributedClaims := make(map[string]claimSource)
+
+ //step through the token to map claim names to claim sources"
+ for cn, src := range token.ClaimNames {
+ if src == "" {
+ return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
+ }
+ s, ok := token.ClaimSources[src]
+ if !ok {
+ return nil, fmt.Errorf("oidc: source does not exist")
+ }
+ distributedClaims[cn] = s
+ }
+
+ t := &IDToken{
+ Issuer: token.Issuer,
+ Subject: token.Subject,
+ Audience: []string(token.Audience),
+ Expiry: time.Time(token.Expiry),
+ IssuedAt: time.Time(token.IssuedAt),
+ Nonce: token.Nonce,
+ AccessTokenHash: token.AtHash,
+ claims: payload,
+ distributedClaims: distributedClaims,
+ }
+
+ // Check issuer.
+ if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
+ // Google sometimes returns "accounts.google.com" as the issuer claim instead of
+ // the required "https://accounts.google.com". Detect this case and allow it only
+ // for Google.
+ //
+ // We will not add hooks to let other providers go off spec like this.
+ if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
+ return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
+ }
+ }
+
+ // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
+ //
+ // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
+ if !v.config.SkipClientIDCheck {
+ if v.config.ClientID != "" {
+ if !contains(t.Audience, v.config.ClientID) {
+ return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
+ }
+ } else {
+ return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
+ }
+ }
+
+ // If a SkipExpiryCheck is false, make sure token is not expired.
+ if !v.config.SkipExpiryCheck {
+ now := time.Now
+ if v.config.Now != nil {
+ now = v.config.Now
+ }
+ nowTime := now()
+
+ if t.Expiry.Before(nowTime) {
+ return nil, &TokenExpiredError{Expiry: t.Expiry}
+ }
+
+ // If nbf claim is provided in token, ensure that it is indeed in the past.
+ if token.NotBefore != nil {
+ nbfTime := time.Time(*token.NotBefore)
+ // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew.
+ // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153
+ leeway := 5 * time.Minute
+
+ if nowTime.Add(leeway).Before(nbfTime) {
+ return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
+ }
+ }
+ }
+
+ if v.config.InsecureSkipSignatureCheck {
+ return t, nil
+ }
+
+ var supportedSigAlgs []jose.SignatureAlgorithm
+ for _, alg := range v.config.SupportedSigningAlgs {
+ supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg))
+ }
+ if len(supportedSigAlgs) == 0 {
+ // If no algorithms were specified by both the config and discovery, default
+ // to the one mandatory algorithm "RS256".
+ supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256}
+ }
+ jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs)
+ if err != nil {
+ return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+ }
+
+ switch len(jws.Signatures) {
+ case 0:
+ return nil, fmt.Errorf("oidc: id token not signed")
+ case 1:
+ default:
+ return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
+ }
+ sig := jws.Signatures[0]
+ t.sigAlgorithm = sig.Header.Algorithm
+
+ ctx = context.WithValue(ctx, parsedJWTKey, jws)
+ gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify signature: %v", err)
+ }
+
+ // Ensure that the payload returned by the square actually matches the payload parsed earlier.
+ if !bytes.Equal(gotPayload, payload) {
+ return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
+ }
+
+ return t, nil
+}
+
+// Nonce returns an auth code option which requires the ID Token created by the
+// OpenID Connect provider to contain the specified nonce.
+func Nonce(nonce string) oauth2.AuthCodeOption {
+ return oauth2.SetAuthURLParam("nonce", nonce)
+}
diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE
new file mode 100644
index 00000000..6243b617
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/LICENSE
@@ -0,0 +1,24 @@
+This package is a mechanical translation of the reference C++ code for
+MetroHash, available at https://github.com/jandrewrogers/MetroHash
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Damian Gryski
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README
new file mode 100644
index 00000000..5ecebb38
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/README
@@ -0,0 +1,6 @@
+MetroHash
+
+This package is a mechanical translation of the reference C++ code for
+MetroHash, available at https://github.com/jandrewrogers/MetroHash
+
+I claim no additional copyright over the original implementation.
diff --git a/vendor/github.com/dgryski/go-metro/metro.py b/vendor/github.com/dgryski/go-metro/metro.py
new file mode 100644
index 00000000..8dd4d26e
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro.py
@@ -0,0 +1,199 @@
+import peachpy.x86_64
+
+k0 = 0xD6D018F5
+k1 = 0xA2AA033B
+k2 = 0x62992FC1
+k3 = 0x30BC5B29
+
+def advance(p,l,c):
+ ADD(p,c)
+ SUB(l,c)
+
+def imul(r,k):
+ t = GeneralPurposeRegister64()
+ MOV(t, k)
+ IMUL(r, t)
+
+def update32(v, p,idx, k, vadd):
+ r = GeneralPurposeRegister64()
+ MOV(r, [p + idx])
+ imul(r, k)
+ ADD(v, r)
+ ROR(v, 29)
+ ADD(v, vadd)
+
+def final32(v, regs, keys):
+ r = GeneralPurposeRegister64()
+ MOV(r, v[regs[1]])
+ ADD(r, v[regs[2]])
+ imul(r, keys[0])
+ ADD(r, v[regs[3]])
+ ROR(r, 37)
+ imul(r, keys[1])
+ XOR(v[regs[0]], r)
+
+seed = Argument(uint64_t)
+buffer_base = Argument(ptr())
+buffer_len = Argument(int64_t)
+buffer_cap = Argument(int64_t)
+
+def makeHash(name, args):
+ with Function(name, args, uint64_t) as function:
+
+ reg_ptr = GeneralPurposeRegister64()
+ reg_ptr_len = GeneralPurposeRegister64()
+ reg_hash = GeneralPurposeRegister64()
+
+ LOAD.ARGUMENT(reg_hash, seed)
+ LOAD.ARGUMENT(reg_ptr, buffer_base)
+ LOAD.ARGUMENT(reg_ptr_len, buffer_len)
+
+ imul(reg_hash, k0)
+ r = GeneralPurposeRegister64()
+ MOV(r, k2*k0)
+ ADD(reg_hash, r)
+
+ after32 = Label("after32")
+
+ CMP(reg_ptr_len, 32)
+ JL(after32)
+ v = [GeneralPurposeRegister64() for _ in range(4)]
+ for i in range(4):
+ MOV(v[i], reg_hash)
+
+ with Loop() as loop:
+ update32(v[0], reg_ptr, 0, k0, v[2])
+ update32(v[1], reg_ptr, 8, k1, v[3])
+ update32(v[2], reg_ptr, 16, k2, v[0])
+ update32(v[3], reg_ptr, 24, k3, v[1])
+
+ ADD(reg_ptr, 32)
+ SUB(reg_ptr_len, 32)
+ CMP(reg_ptr_len, 32)
+ JGE(loop.begin)
+
+ final32(v, [2,0,3,1], [k0, k1])
+ final32(v, [3,1,2,0], [k1, k0])
+ final32(v, [0,0,2,3], [k0, k1])
+ final32(v, [1,1,3,2], [k1, k0])
+
+ XOR(v[0], v[1])
+ ADD(reg_hash, v[0])
+
+ LABEL(after32)
+
+ after16 = Label("after16")
+ CMP(reg_ptr_len, 16)
+ JL(after16)
+
+ for i in range(2):
+ MOV(v[i], [reg_ptr])
+ imul(v[i], k2)
+ ADD(v[i], reg_hash)
+
+ advance(reg_ptr, reg_ptr_len, 8)
+
+ ROR(v[i], 29)
+ imul(v[i], k3)
+
+ r = GeneralPurposeRegister64()
+ MOV(r, v[0])
+ imul(r, k0)
+ ROR(r, 21)
+ ADD(r, v[1])
+ XOR(v[0], r)
+
+ MOV(r, v[1])
+ imul(r, k3)
+ ROR(r, 21)
+ ADD(r, v[0])
+ XOR(v[1], r)
+
+ ADD(reg_hash, v[1])
+
+ LABEL(after16)
+
+ after8 = Label("after8")
+ CMP(reg_ptr_len, 8)
+ JL(after8)
+
+ r = GeneralPurposeRegister64()
+ MOV(r, [reg_ptr])
+ imul(r, k3)
+ ADD(reg_hash, r)
+ advance(reg_ptr, reg_ptr_len, 8)
+
+ MOV(r, reg_hash)
+ ROR(r, 55)
+ imul(r, k1)
+ XOR(reg_hash, r)
+
+ LABEL(after8)
+
+ after4 = Label("after4")
+ CMP(reg_ptr_len, 4)
+ JL(after4)
+
+ r = GeneralPurposeRegister64()
+ XOR(r, r)
+ MOV(r.as_dword, dword[reg_ptr])
+ imul(r, k3)
+ ADD(reg_hash, r)
+ advance(reg_ptr, reg_ptr_len, 4)
+
+ MOV(r, reg_hash)
+ ROR(r, 26)
+ imul(r, k1)
+ XOR(reg_hash, r)
+
+ LABEL(after4)
+
+ after2 = Label("after2")
+ CMP(reg_ptr_len, 2)
+ JL(after2)
+
+ r = GeneralPurposeRegister64()
+ XOR(r,r)
+ MOV(r.as_word, word[reg_ptr])
+ imul(r, k3)
+ ADD(reg_hash, r)
+ advance(reg_ptr, reg_ptr_len, 2)
+
+ MOV(r, reg_hash)
+ ROR(r, 48)
+ imul(r, k1)
+ XOR(reg_hash, r)
+
+ LABEL(after2)
+
+ after1 = Label("after1")
+ CMP(reg_ptr_len, 1)
+ JL(after1)
+
+ r = GeneralPurposeRegister64()
+ MOVZX(r, byte[reg_ptr])
+ imul(r, k3)
+ ADD(reg_hash, r)
+
+ MOV(r, reg_hash)
+ ROR(r, 37)
+ imul(r, k1)
+ XOR(reg_hash, r)
+
+ LABEL(after1)
+
+ r = GeneralPurposeRegister64()
+ MOV(r, reg_hash)
+ ROR(r, 28)
+ XOR(reg_hash, r)
+
+ imul(reg_hash, k0)
+
+ MOV(r, reg_hash)
+ ROR(r, 29)
+ XOR(reg_hash, r)
+
+ RETURN(reg_hash)
+
+makeHash("Hash64", (buffer_base, buffer_len, buffer_cap, seed))
+makeHash("Hash64Str", (buffer_base, buffer_len, seed))
\ No newline at end of file
diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go
new file mode 100644
index 00000000..e8dd8ddb
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro128.go
@@ -0,0 +1,94 @@
+package metro
+
+import "encoding/binary"
+
+func rotate_right(v uint64, k uint) uint64 {
+ return (v >> k) | (v << (64 - k))
+}
+
+func Hash128(buffer []byte, seed uint64) (uint64, uint64) {
+
+ const (
+ k0 = 0xC83A91E1
+ k1 = 0x8648DBDB
+ k2 = 0x7BDEC03B
+ k3 = 0x2F5870A5
+ )
+
+ ptr := buffer
+
+ var v [4]uint64
+
+ v[0] = (seed - k0) * k3
+ v[1] = (seed + k1) * k2
+
+ if len(ptr) >= 32 {
+ v[2] = (seed + k0) * k2
+ v[3] = (seed - k1) * k3
+
+ for len(ptr) >= 32 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k0
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 29) + v[2]
+ v[1] += binary.LittleEndian.Uint64(ptr) * k1
+ ptr = ptr[8:]
+ v[1] = rotate_right(v[1], 29) + v[3]
+ v[2] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[2] = rotate_right(v[2], 29) + v[0]
+ v[3] += binary.LittleEndian.Uint64(ptr) * k3
+ ptr = ptr[8:]
+ v[3] = rotate_right(v[3], 29) + v[1]
+ }
+
+ v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1
+ v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0
+ v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1
+ v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0
+ }
+
+ if len(ptr) >= 16 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[1] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[1] = rotate_right(v[1], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0
+ }
+
+ if len(ptr) >= 8 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1
+ }
+
+ if len(ptr) >= 4 {
+ v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2
+ ptr = ptr[4:]
+ v[1] = rotate_right(v[1], 33) * k3
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0
+ }
+
+ if len(ptr) >= 2 {
+ v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2
+ ptr = ptr[2:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1
+ }
+
+ if len(ptr) >= 1 {
+ v[1] += uint64(ptr[0]) * k2
+ v[1] = rotate_right(v[1], 33) * k3
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0
+ }
+
+ v[0] += rotate_right((v[0]*k0)+v[1], 13)
+ v[1] += rotate_right((v[1]*k1)+v[0], 37)
+ v[0] += rotate_right((v[0]*k2)+v[1], 13)
+ v[1] += rotate_right((v[1]*k3)+v[0], 37)
+
+ return v[0], v[1]
+}
diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go
new file mode 100644
index 00000000..7901ab6c
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro64.go
@@ -0,0 +1,85 @@
+// +build noasm !amd64 gccgo
+
+package metro
+
+import "encoding/binary"
+
+func Hash64(buffer []byte, seed uint64) uint64 {
+
+ const (
+ k0 = 0xD6D018F5
+ k1 = 0xA2AA033B
+ k2 = 0x62992FC1
+ k3 = 0x30BC5B29
+ )
+
+ ptr := buffer
+
+ hash := (seed + k2) * k0
+
+ if len(ptr) >= 32 {
+ v := [4]uint64{hash, hash, hash, hash}
+
+ for len(ptr) >= 32 {
+ v[0] += binary.LittleEndian.Uint64(ptr[:8]) * k0
+ v[0] = rotate_right(v[0], 29) + v[2]
+ v[1] += binary.LittleEndian.Uint64(ptr[8:16]) * k1
+ v[1] = rotate_right(v[1], 29) + v[3]
+ v[2] += binary.LittleEndian.Uint64(ptr[16:24]) * k2
+ v[2] = rotate_right(v[2], 29) + v[0]
+ v[3] += binary.LittleEndian.Uint64(ptr[24:32]) * k3
+ v[3] = rotate_right(v[3], 29) + v[1]
+ ptr = ptr[32:]
+ }
+
+ v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 37) * k1
+ v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 37) * k0
+ v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 37) * k1
+ v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 37) * k0
+ hash += v[0] ^ v[1]
+ }
+
+ if len(ptr) >= 16 {
+ v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2)
+ v0 = rotate_right(v0, 29) * k3
+ v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2)
+ v1 = rotate_right(v1, 29) * k3
+ v0 ^= rotate_right(v0*k0, 21) + v1
+ v1 ^= rotate_right(v1*k3, 21) + v0
+ hash += v1
+ ptr = ptr[16:]
+ }
+
+ if len(ptr) >= 8 {
+ hash += binary.LittleEndian.Uint64(ptr[:8]) * k3
+ ptr = ptr[8:]
+ hash ^= rotate_right(hash, 55) * k1
+ }
+
+ if len(ptr) >= 4 {
+ hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3
+ hash ^= rotate_right(hash, 26) * k1
+ ptr = ptr[4:]
+ }
+
+ if len(ptr) >= 2 {
+ hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3
+ ptr = ptr[2:]
+ hash ^= rotate_right(hash, 48) * k1
+ }
+
+ if len(ptr) >= 1 {
+ hash += uint64(ptr[0]) * k3
+ hash ^= rotate_right(hash, 37) * k1
+ }
+
+ hash ^= rotate_right(hash, 28)
+ hash *= k0
+ hash ^= rotate_right(hash, 29)
+
+ return hash
+}
+
+func Hash64Str(buffer string, seed uint64) uint64 {
+ return Hash64([]byte(buffer), seed)
+}
diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s
new file mode 100644
index 00000000..7fa4730a
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro_amd64.s
@@ -0,0 +1,372 @@
+// +build !noasm
+// +build !gccgo
+
+// Generated by PeachPy 0.2.0 from metro.py
+
+// func Hash64(buffer_base uintptr, buffer_len int64, buffer_cap int64, seed uint64) uint64
+TEXT ·Hash64(SB),4,$0-40
+ MOVQ seed+24(FP), AX
+ MOVQ buffer_base+0(FP), BX
+ MOVQ buffer_len+8(FP), CX
+ MOVQ $3603962101, DX
+ IMULQ DX, AX
+ MOVQ $5961697176435608501, DX
+ ADDQ DX, AX
+ CMPQ CX, $32
+ JLT after32
+ MOVQ AX, DX
+ MOVQ AX, DI
+ MOVQ AX, SI
+ MOVQ AX, BP
+loop_begin:
+ MOVQ 0(BX), R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ R8, DX
+ RORQ $29, DX
+ ADDQ SI, DX
+ MOVQ 8(BX), R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ ADDQ R8, DI
+ RORQ $29, DI
+ ADDQ BP, DI
+ MOVQ 16(BX), R8
+ MOVQ $1654206401, R9
+ IMULQ R9, R8
+ ADDQ R8, SI
+ RORQ $29, SI
+ ADDQ DX, SI
+ MOVQ 24(BX), R8
+ MOVQ $817650473, R9
+ IMULQ R9, R8
+ ADDQ R8, BP
+ RORQ $29, BP
+ ADDQ DI, BP
+ ADDQ $32, BX
+ SUBQ $32, CX
+ CMPQ CX, $32
+ JGE loop_begin
+ MOVQ DX, R8
+ ADDQ BP, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ DI, R8
+ RORQ $37, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ XORQ R8, SI
+ MOVQ DI, R8
+ ADDQ SI, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ ADDQ DX, R8
+ RORQ $37, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ XORQ R8, BP
+ MOVQ DX, R8
+ ADDQ SI, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ BP, R8
+ RORQ $37, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ XORQ R8, DX
+ MOVQ DI, R8
+ ADDQ BP, R8
+ MOVQ $2729050939, BP
+ IMULQ BP, R8
+ ADDQ SI, R8
+ RORQ $37, R8
+ MOVQ $3603962101, SI
+ IMULQ SI, R8
+ XORQ R8, DI
+ XORQ DI, DX
+ ADDQ DX, AX
+after32:
+ CMPQ CX, $16
+ JLT after16
+ MOVQ 0(BX), DX
+ MOVQ $1654206401, DI
+ IMULQ DI, DX
+ ADDQ AX, DX
+ ADDQ $8, BX
+ SUBQ $8, CX
+ RORQ $29, DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ MOVQ 0(BX), DI
+ MOVQ $1654206401, SI
+ IMULQ SI, DI
+ ADDQ AX, DI
+ ADDQ $8, BX
+ SUBQ $8, CX
+ RORQ $29, DI
+ MOVQ $817650473, SI
+ IMULQ SI, DI
+ MOVQ DX, SI
+ MOVQ $3603962101, BP
+ IMULQ BP, SI
+ RORQ $21, SI
+ ADDQ DI, SI
+ XORQ SI, DX
+ MOVQ DI, SI
+ MOVQ $817650473, BP
+ IMULQ BP, SI
+ RORQ $21, SI
+ ADDQ DX, SI
+ XORQ SI, DI
+ ADDQ DI, AX
+after16:
+ CMPQ CX, $8
+ JLT after8
+ MOVQ 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $8, BX
+ SUBQ $8, CX
+ MOVQ AX, DX
+ RORQ $55, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after8:
+ CMPQ CX, $4
+ JLT after4
+ XORQ DX, DX
+ MOVL 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $4, BX
+ SUBQ $4, CX
+ MOVQ AX, DX
+ RORQ $26, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after4:
+ CMPQ CX, $2
+ JLT after2
+ XORQ DX, DX
+ MOVW 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $2, BX
+ SUBQ $2, CX
+ MOVQ AX, DX
+ RORQ $48, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after2:
+ CMPQ CX, $1
+ JLT after1
+ MOVBQZX 0(BX), BX
+ MOVQ $817650473, CX
+ IMULQ CX, BX
+ ADDQ BX, AX
+ MOVQ AX, BX
+ RORQ $37, BX
+ MOVQ $2729050939, CX
+ IMULQ CX, BX
+ XORQ BX, AX
+after1:
+ MOVQ AX, BX
+ RORQ $28, BX
+ XORQ BX, AX
+ MOVQ $3603962101, BX
+ IMULQ BX, AX
+ MOVQ AX, BX
+ RORQ $29, BX
+ XORQ BX, AX
+ MOVQ AX, ret+32(FP)
+ RET
+
+// func Hash64Str(buffer_base uintptr, buffer_len int64, seed uint64) uint64
+TEXT ·Hash64Str(SB),4,$0-32
+ MOVQ seed+16(FP), AX
+ MOVQ buffer_base+0(FP), BX
+ MOVQ buffer_len+8(FP), CX
+ MOVQ $3603962101, DX
+ IMULQ DX, AX
+ MOVQ $5961697176435608501, DX
+ ADDQ DX, AX
+ CMPQ CX, $32
+ JLT after32
+ MOVQ AX, DX
+ MOVQ AX, DI
+ MOVQ AX, SI
+ MOVQ AX, BP
+loop_begin:
+ MOVQ 0(BX), R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ R8, DX
+ RORQ $29, DX
+ ADDQ SI, DX
+ MOVQ 8(BX), R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ ADDQ R8, DI
+ RORQ $29, DI
+ ADDQ BP, DI
+ MOVQ 16(BX), R8
+ MOVQ $1654206401, R9
+ IMULQ R9, R8
+ ADDQ R8, SI
+ RORQ $29, SI
+ ADDQ DX, SI
+ MOVQ 24(BX), R8
+ MOVQ $817650473, R9
+ IMULQ R9, R8
+ ADDQ R8, BP
+ RORQ $29, BP
+ ADDQ DI, BP
+ ADDQ $32, BX
+ SUBQ $32, CX
+ CMPQ CX, $32
+ JGE loop_begin
+ MOVQ DX, R8
+ ADDQ BP, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ DI, R8
+ RORQ $37, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ XORQ R8, SI
+ MOVQ DI, R8
+ ADDQ SI, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ ADDQ DX, R8
+ RORQ $37, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ XORQ R8, BP
+ MOVQ DX, R8
+ ADDQ SI, R8
+ MOVQ $3603962101, R9
+ IMULQ R9, R8
+ ADDQ BP, R8
+ RORQ $37, R8
+ MOVQ $2729050939, R9
+ IMULQ R9, R8
+ XORQ R8, DX
+ MOVQ DI, R8
+ ADDQ BP, R8
+ MOVQ $2729050939, BP
+ IMULQ BP, R8
+ ADDQ SI, R8
+ RORQ $37, R8
+ MOVQ $3603962101, SI
+ IMULQ SI, R8
+ XORQ R8, DI
+ XORQ DI, DX
+ ADDQ DX, AX
+after32:
+ CMPQ CX, $16
+ JLT after16
+ MOVQ 0(BX), DX
+ MOVQ $1654206401, DI
+ IMULQ DI, DX
+ ADDQ AX, DX
+ ADDQ $8, BX
+ SUBQ $8, CX
+ RORQ $29, DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ MOVQ 0(BX), DI
+ MOVQ $1654206401, SI
+ IMULQ SI, DI
+ ADDQ AX, DI
+ ADDQ $8, BX
+ SUBQ $8, CX
+ RORQ $29, DI
+ MOVQ $817650473, SI
+ IMULQ SI, DI
+ MOVQ DX, SI
+ MOVQ $3603962101, BP
+ IMULQ BP, SI
+ RORQ $21, SI
+ ADDQ DI, SI
+ XORQ SI, DX
+ MOVQ DI, SI
+ MOVQ $817650473, BP
+ IMULQ BP, SI
+ RORQ $21, SI
+ ADDQ DX, SI
+ XORQ SI, DI
+ ADDQ DI, AX
+after16:
+ CMPQ CX, $8
+ JLT after8
+ MOVQ 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $8, BX
+ SUBQ $8, CX
+ MOVQ AX, DX
+ RORQ $55, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after8:
+ CMPQ CX, $4
+ JLT after4
+ XORQ DX, DX
+ MOVL 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $4, BX
+ SUBQ $4, CX
+ MOVQ AX, DX
+ RORQ $26, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after4:
+ CMPQ CX, $2
+ JLT after2
+ XORQ DX, DX
+ MOVW 0(BX), DX
+ MOVQ $817650473, DI
+ IMULQ DI, DX
+ ADDQ DX, AX
+ ADDQ $2, BX
+ SUBQ $2, CX
+ MOVQ AX, DX
+ RORQ $48, DX
+ MOVQ $2729050939, DI
+ IMULQ DI, DX
+ XORQ DX, AX
+after2:
+ CMPQ CX, $1
+ JLT after1
+ MOVBQZX 0(BX), BX
+ MOVQ $817650473, CX
+ IMULQ CX, BX
+ ADDQ BX, AX
+ MOVQ AX, BX
+ RORQ $37, BX
+ MOVQ $2729050939, CX
+ IMULQ CX, BX
+ XORQ BX, AX
+after1:
+ MOVQ AX, BX
+ RORQ $28, BX
+ XORQ BX, AX
+ MOVQ $3603962101, BX
+ IMULQ BX, AX
+ MOVQ AX, BX
+ RORQ $29, BX
+ XORQ BX, AX
+ MOVQ AX, ret+24(FP)
+ RET
diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go
new file mode 100644
index 00000000..86ddcb47
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro_stub.go
@@ -0,0 +1,10 @@
+// +build !noasm,amd64
+// +build !gccgo
+
+package metro
+
+//go:generate python -m peachpy.x86_64 metro.py -S -o metro_amd64.s -mabi=goasm
+//go:noescape
+
+func Hash64(buffer []byte, seed uint64) uint64
+func Hash64Str(buffer string, seed uint64) uint64
diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore
new file mode 100644
index 00000000..ba22c99a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.gitignore
@@ -0,0 +1,3 @@
+.idea
+*.sw?
+.vscode
diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml
new file mode 100644
index 00000000..7b8e26bc
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+go:
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - 1.14.x
+
+script:
+ - go get -d -t ./...
+ - go vet ./...
+ - go test ./...
+ - >
+ go_version=$(go version);
+ if [ ${go_version:13:4} = "1.12" ]; then
+ go get -u golang.org/x/tools/cmd/goimports;
+ goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :;
+ fi
+
diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md
new file mode 100644
index 00000000..9a64a72e
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CHANGELOG.md
@@ -0,0 +1,190 @@
+# Changelog
+
+## v4.1.2 (2020-06-02)
+
+- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
+- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
+
+
+## v4.1.1 (2020-04-16)
+
+- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
+ route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
+- new middleware.RouteHeaders as a simple router for request headers with wildcard support
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
+
+
+## v4.1.0 (2020-04-1)
+
+- middleware.LogEntry: Write method on interface now passes the response header
+ and an extra interface type useful for custom logger implementations.
+- middleware.WrapResponseWriter: minor fix
+- middleware.Recoverer: a bit prettier
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
+
+
+## v4.0.4 (2020-03-24)
+
+- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
+- a few minor improvements and fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
+
+
+## v4.0.3 (2020-01-09)
+
+- core: fix regexp routing to include default value when param is not matched
+- middleware: rewrite of middleware.Compress
+- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
+
+
+## v4.0.2 (2019-02-26)
+
+- Minor fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
+
+
+## v4.0.1 (2019-01-21)
+
+- Fixes issue with compress middleware: #382 #385
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
+
+
+## v4.0.0 (2019-01-10)
+
+- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
+- router: respond with 404 on router with no routes (#362)
+- router: additional check to ensure wildcard is at the end of a url pattern (#333)
+- middleware: deprecate use of http.CloseNotifier (#347)
+- middleware: fix RedirectSlashes to include query params on redirect (#334)
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
+
+
+## v3.3.4 (2019-01-07)
+
+- Minor middleware improvements. No changes to core library/router. Moving v3 into its
+- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
+
+
+## v3.3.3 (2018-08-27)
+
+- Minor release
+- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
+
+
+## v3.3.2 (2017-12-22)
+
+- Support to route trailing slashes on mounted sub-routers (#281)
+- middleware: new `ContentCharset` to check matching charsets. Thank you
+ @csucu for your community contribution!
+
+
+## v3.3.1 (2017-11-20)
+
+- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
+- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
+- Minor bug fixes
+
+
+## v3.3.0 (2017-10-10)
+
+- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
+- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
+
+
+## v3.2.1 (2017-08-31)
+
+- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
+ and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
+- Add new `RouteMethod` to `*Context`
+- Add new `Routes` pointer to `*Context`
+- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
+- Updated benchmarks (see README)
+
+
+## v3.1.5 (2017-08-02)
+
+- Setup golint and go vet for the project
+- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
+ to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
+
+
+## v3.1.0 (2017-07-10)
+
+- Fix a few minor issues after v3 release
+- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
+- Move `render` sub-pkg to https://github.com/go-chi/render
+- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
+ suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
+ https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
+
+
+## v3.0.0 (2017-06-21)
+
+- Major update to chi library with many exciting updates, but also some *breaking changes*
+- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
+ `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
+ same router
+- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
+ `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
+- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
+ `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
+ in `_examples/custom-handler`
+- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
+ own using file handler with the stdlib, see `_examples/fileserver` for an example
+- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
+- Moved the chi project to its own organization, to allow chi-related community packages to
+ be easily discovered and supported, at: https://github.com/go-chi
+- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
+- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
+
+
+## v2.1.0 (2017-03-30)
+
+- Minor improvements and update to the chi core library
+- Introduced a brand new `chi/render` sub-package to complete the story of building
+ APIs to offer a pattern for managing well-defined request / response payloads. Please
+ check out the updated `_examples/rest` example for how it works.
+- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
+
+
+## v2.0.0 (2017-01-06)
+
+- After many months of v2 being in an RC state with many companies and users running it in
+ production, the inclusion of some improvements to the middlewares, we are very pleased to
+ announce v2.0.0 of chi.
+
+
+## v2.0.0-rc1 (2016-07-26)
+
+- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
+ community `"net/context"` package has been included in the standard library as `"context"` and
+ utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
+ request-scoped values. We're very excited about the new context addition and are proud to
+ introduce chi v2, a minimal and powerful routing package for building large HTTP services,
+ with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
+ stdlib HTTP handlers and middlwares.
+- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
+- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
+- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
+ which provides direct access to URL routing parameters, the routing path and the matching
+ routing patterns.
+- Users upgrading from chi v1 to v2, need to:
+ 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
+ the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
+ 2. Use `chi.URLParam(r *http.Request, paramKey string) string`
+ or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
+
+
+## v1.0.0 (2016-07-01)
+
+- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
+
+
+## v0.9.0 (2016-03-31)
+
+- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
+- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
+ has changed to: `chi.URLParam(ctx, "id")`
diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
new file mode 100644
index 00000000..c0ac2dfe
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+## Prerequisites
+
+1. [Install Go][go-install].
+2. Download the sources and switch the working directory:
+
+ ```bash
+ go get -u -d github.com/go-chi/chi
+ cd $GOPATH/src/github.com/go-chi/chi
+ ```
+
+## Submitting a Pull Request
+
+A typical workflow is:
+
+1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
+2. [Create a topic branch.][branch]
+3. Add tests for your change.
+4. Run `go test`. If your tests pass, return to the step 3.
+5. Implement the change and ensure the steps from the previous step pass.
+6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
+7. [Add, commit and push your changes.][git-help]
+8. [Submit a pull request.][pull-req]
+
+[go-install]: https://golang.org/doc/install
+[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
+[fork]: https://help.github.com/articles/fork-a-repo
+[branch]: http://learn.github.com/p/branching.html
+[git-help]: https://guides.github.com
+[pull-req]: https://help.github.com/articles/using-pull-requests
diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE
new file mode 100644
index 00000000..d99f02ff
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md
new file mode 100644
index 00000000..5a8fc9d0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/README.md
@@ -0,0 +1,441 @@
+#
+
+
+[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
+
+`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
+especially good at helping you write large REST API services that are kept maintainable as your
+project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
+handle signaling, cancelation and request-scoped values across a handler chain.
+
+The focus of the project has been to seek out an elegant and comfortable design for writing
+REST API servers, written during the development of the Pressly API service that powers our
+public API service, which in turn powers all of our client-side applications.
+
+The key considerations of chi's design are: project structure, maintainability, standard http
+handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
+parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
+included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
+
+## Install
+
+`go get -u github.com/go-chi/chi`
+
+
+## Features
+
+* **Lightweight** - cloc'd in ~1000 LOC for the chi router
+* **Fast** - yes, see [benchmarks](#benchmarks)
+* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
+* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting
+* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
+* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
+* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
+* **No external dependencies** - plain ol' Go stdlib + net/http
+
+
+## Examples
+
+See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
+
+
+**As easy as:**
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+ r.Use(middleware.Logger)
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("welcome"))
+ })
+ http.ListenAndServe(":3000", r)
+}
+```
+
+**REST Preview:**
+
+Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
+in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
+Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
+
+I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
+above, they will show you all the features of chi and serve as a good form of documentation.
+
+```go
+import (
+ //...
+ "context"
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+
+ // A good base middleware stack
+ r.Use(middleware.RequestID)
+ r.Use(middleware.RealIP)
+ r.Use(middleware.Logger)
+ r.Use(middleware.Recoverer)
+
+ // Set a timeout value on the request context (ctx), that will signal
+ // through ctx.Done() that the request has timed out and further
+ // processing should be stopped.
+ r.Use(middleware.Timeout(60 * time.Second))
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hi"))
+ })
+
+ // RESTy routes for "articles" resource
+ r.Route("/articles", func(r chi.Router) {
+ r.With(paginate).Get("/", listArticles) // GET /articles
+ r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
+
+ r.Post("/", createArticle) // POST /articles
+ r.Get("/search", searchArticles) // GET /articles/search
+
+ // Regexp url parameters:
+ r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
+
+ // Subrouters:
+ r.Route("/{articleID}", func(r chi.Router) {
+ r.Use(ArticleCtx)
+ r.Get("/", getArticle) // GET /articles/123
+ r.Put("/", updateArticle) // PUT /articles/123
+ r.Delete("/", deleteArticle) // DELETE /articles/123
+ })
+ })
+
+ // Mount the admin sub-router
+ r.Mount("/admin", adminRouter())
+
+ http.ListenAndServe(":3333", r)
+}
+
+func ArticleCtx(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ articleID := chi.URLParam(r, "articleID")
+ article, err := dbGetArticle(articleID)
+ if err != nil {
+ http.Error(w, http.StatusText(404), 404)
+ return
+ }
+ ctx := context.WithValue(r.Context(), "article", article)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func getArticle(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ article, ok := ctx.Value("article").(*Article)
+ if !ok {
+ http.Error(w, http.StatusText(422), 422)
+ return
+ }
+ w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
+}
+
+// A completely separate router for administrator routes
+func adminRouter() http.Handler {
+ r := chi.NewRouter()
+ r.Use(AdminOnly)
+ r.Get("/", adminIndex)
+ r.Get("/accounts", adminListAccounts)
+ return r
+}
+
+func AdminOnly(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ perm, ok := ctx.Value("acl.permission").(YourPermissionType)
+ if !ok || !perm.IsAdmin() {
+ http.Error(w, http.StatusText(403), 403)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+
+## Router design
+
+chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
+The router is fully compatible with `net/http`.
+
+Built on top of the tree is the `Router` interface:
+
+```go
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the github.com/go-chi/docgen package to generate documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+```
+
+Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
+supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
+can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
+and `chi.URLParam(r, "*")` for a wildcard parameter.
+
+
+### Middleware handlers
+
+chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
+about them, which means the router and all the tooling is designed to be compatible and
+friendly with any middleware in the community. This offers much better extensibility and reuse
+of packages and is at the heart of chi's purpose.
+
+Here is an example of a standard net/http middleware handler using the new request context
+available in Go. This middleware sets a hypothetical user identifier on the request
+context and calls the next handler in the chain.
+
+```go
+// HTTP middleware setting a value on the request context
+func MyMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := context.WithValue(r.Context(), "user", "123")
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+```
+
+
+### Request handlers
+
+chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
+func that reads a user identifier from the request context - hypothetically, identifying
+the user sending an authenticated request, validated+set by a previous middleware handler.
+
+```go
+// HTTP handler accessing data from the request context.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ user := r.Context().Value("user").(string)
+ w.Write([]byte(fmt.Sprintf("hi %s", user)))
+}
+```
+
+
+### URL parameters
+
+chi's router parses and stores URL parameters right onto the request context. Here is
+an example of how to access URL params in your net/http handlers. And of course, middlewares
+are able to access the same information.
+
+```go
+// HTTP handler accessing the url routing parameters.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ userID := chi.URLParam(r, "userID") // from a route like /users/{userID}
+
+ ctx := r.Context()
+ key := ctx.Value("key").(string)
+
+ w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
+}
+```
+
+
+## Middlewares
+
+chi comes equipped with an optional `middleware` package, providing a suite of standard
+`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
+with `net/http` can be used with chi's mux.
+
+### Core middlewares
+
+-----------------------------------------------------------------------------------------------------------
+| chi/middleware Handler | description |
+|:----------------------|:---------------------------------------------------------------------------------
+| AllowContentType | Explicit whitelist of accepted request Content-Types |
+| BasicAuth | Basic HTTP authentication |
+| Compress | Gzip compression for clients that accept compressed responses |
+| GetHead | Automatically route undefined HEAD requests to GET handlers |
+| Heartbeat | Monitoring endpoint to check the servers pulse |
+| Logger | Logs the start and end of each request with the elapsed processing time |
+| NoCache | Sets response headers to prevent clients from caching |
+| Profiler | Easily attach net/http/pprof to your routers |
+| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP |
+| Recoverer | Gracefully absorb panics and prints the stack trace |
+| RequestID | Injects a request ID into the context of each request |
+| RedirectSlashes | Redirect slashes on routing paths |
+| SetHeader | Short-hand middleware to set a response header key/value |
+| StripSlashes | Strip slashes on routing paths |
+| Throttle | Puts a ceiling on the number of concurrent requests |
+| Timeout | Signals to the request context when the timeout deadline is reached |
+| URLFormat | Parse extension from url and put it on request context |
+| WithValue | Short-hand middleware to set a key/value on the request context |
+-----------------------------------------------------------------------------------------------------------
+
+### Extra middlewares & packages
+
+Please see https://github.com/go-chi for additional packages.
+
+--------------------------------------------------------------------------------------------------------------------
+| package | description |
+|:---------------------------------------------------|:-------------------------------------------------------------
+| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
+| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
+| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
+| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
+| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
+| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
+| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
+| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
+| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
+--------------------------------------------------------------------------------------------------------------------
+
+please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware
+
+
+## context?
+
+`context` is a tiny pkg that provides simple interface to signal context across call stacks
+and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
+and is available in stdlib since go1.7.
+
+Learn more at https://blog.golang.org/context
+
+and..
+* Docs: https://golang.org/pkg/context
+* Source: https://github.com/golang/go/tree/master/src/context
+
+
+## Benchmarks
+
+The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
+
+Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop
+
+```shell
+BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op
+BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op
+BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op
+BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op
+```
+
+Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
+
+NOTE: the allocs in the benchmark above are from the calls to http.Request's
+`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
+on the duplicated (alloc'd) request and returns it the new request object. This is just
+how setting context on a request in Go works.
+
+
+## Credits
+
+* Carl Jackson for https://github.com/zenazn/goji
+ * Parts of chi's thinking comes from goji, and chi's middleware package
+ sources from goji.
+* Armon Dadgar for https://github.com/armon/go-radix
+* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
+
+We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
+
+
+## Beyond REST
+
+chi is just a http router that lets you decompose request handling into many smaller layers.
+Many companies use chi to write REST services for their public APIs. But, REST is just a convention
+for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
+system or network of microservices.
+
+Looking beyond REST, I also recommend some newer works in the field:
+* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
+* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
+* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
+* [NATS](https://nats.io) - lightweight pub-sub
+
+
+## License
+
+Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
+
+Licensed under [MIT License](./LICENSE)
+
+[GoDoc]: https://godoc.org/github.com/go-chi/chi
+[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
+[Travis]: https://travis-ci.org/go-chi/chi
+[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go
new file mode 100644
index 00000000..88e68461
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chain.go
@@ -0,0 +1,49 @@
+package chi
+
+import "net/http"
+
+// Chain returns a Middlewares type from a slice of middleware handlers.
+func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
+ return Middlewares(middlewares)
+}
+
+// Handler builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) Handler(h http.Handler) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// ChainHandler is a http.Handler with support for handler composition and
+// execution.
+type ChainHandler struct {
+ Middlewares Middlewares
+ Endpoint http.Handler
+ chain http.Handler
+}
+
+func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ c.chain.ServeHTTP(w, r)
+}
+
+// chain builds a http.Handler composed of an inline middleware stack and endpoint
+// handler in the order they are passed.
+func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
+ // Return ahead of time if there aren't any middlewares for the chain
+ if len(middlewares) == 0 {
+ return endpoint
+ }
+
+ // Wrap the end handler with the middleware chain
+ h := middlewares[len(middlewares)-1](endpoint)
+ for i := len(middlewares) - 2; i >= 0; i-- {
+ h = middlewares[i](h)
+ }
+
+ return h
+}
diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go
new file mode 100644
index 00000000..b7063dc2
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chi.go
@@ -0,0 +1,134 @@
+//
+// Package chi is a small, idiomatic and composable router for building HTTP services.
+//
+// chi requires Go 1.10 or newer.
+//
+// Example:
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/go-chi/chi"
+// "github.com/go-chi/chi/middleware"
+// )
+//
+// func main() {
+// r := chi.NewRouter()
+// r.Use(middleware.Logger)
+// r.Use(middleware.Recoverer)
+//
+// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("root."))
+// })
+//
+// http.ListenAndServe(":3333", r)
+// }
+//
+// See github.com/go-chi/chi/_examples/ for more in-depth examples.
+//
+// URL patterns allow for easy matching of path components in HTTP
+// requests. The matching components can then be accessed using
+// chi.URLParam(). All patterns must begin with a slash.
+//
+// A simple named placeholder {name} matches any sequence of characters
+// up to the next / or the end of the URL. Trailing slashes on paths must
+// be handled explicitly.
+//
+// A placeholder with a name followed by a colon allows a regular
+// expression match, for example {number:\\d+}. The regular expression
+// syntax is Go's normal regexp RE2 syntax, except that regular expressions
+// including { or } are not supported, and / will never be
+// matched. An anonymous regexp pattern is allowed, using an empty string
+// before the colon in the placeholder, such as {:\\d+}
+//
+// The special placeholder of asterisk matches the rest of the requested
+// URL. Any trailing characters in the pattern are ignored. This is the only
+// placeholder which will match / characters.
+//
+// Examples:
+// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
+// "/user/{name}/info" matches "/user/jsmith/info"
+// "/page/*" matches "/page/intro/latest"
+// "/page/*/index" also matches "/page/intro/latest"
+// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
+//
+package chi
+
+import "net/http"
+
+// NewRouter returns a new Mux object that implements the Router interface.
+func NewRouter() *Mux {
+ return NewMux()
+}
+
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the `docgen` subpackage to generation documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+
+// Middlewares type is a slice of standard middleware handlers with methods
+// to compose middleware chains and http.Handler's.
+type Middlewares []func(http.Handler) http.Handler
diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go
new file mode 100644
index 00000000..26c609ea
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/context.go
@@ -0,0 +1,172 @@
+package chi
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// URLParam returns the url parameter from a http.Request object.
+func URLParam(r *http.Request, key string) string {
+ if rctx := RouteContext(r.Context()); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// URLParamFromCtx returns the url parameter from a http.Request Context.
+func URLParamFromCtx(ctx context.Context, key string) string {
+ if rctx := RouteContext(ctx); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// RouteContext returns chi's routing Context object from a
+// http.Request Context.
+func RouteContext(ctx context.Context) *Context {
+ val, _ := ctx.Value(RouteCtxKey).(*Context)
+ return val
+}
+
+// ServerBaseContext wraps an http.Handler to set the request context to the
+// `baseCtx`.
+func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler {
+ fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ baseCtx := baseCtx
+
+ // Copy over default net/http server context keys
+ if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok {
+ baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v)
+ }
+ if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok {
+ baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v)
+ }
+
+ h.ServeHTTP(w, r.WithContext(baseCtx))
+ })
+ return fn
+}
+
+// NewRouteContext returns a new routing Context object.
+func NewRouteContext() *Context {
+ return &Context{}
+}
+
+var (
+ // RouteCtxKey is the context.Context key to store the request context.
+ RouteCtxKey = &contextKey{"RouteContext"}
+)
+
+// Context is the default routing context set on the root node of a
+// request context to track route patterns, URL parameters and
+// an optional routing path.
+type Context struct {
+ Routes Routes
+
+ // Routing path/method override used during the route search.
+ // See Mux#routeHTTP method.
+ RoutePath string
+ RouteMethod string
+
+ // Routing pattern stack throughout the lifecycle of the request,
+ // across all connected routers. It is a record of all matching
+ // patterns across a stack of sub-routers.
+ RoutePatterns []string
+
+ // URLParams are the stack of routeParams captured during the
+ // routing lifecycle across a stack of sub-routers.
+ URLParams RouteParams
+
+ // The endpoint routing pattern that matched the request URI path
+ // or `RoutePath` of the current sub-router. This value will update
+ // during the lifecycle of a request passing through a stack of
+ // sub-routers.
+ routePattern string
+
+ // Route parameters matched for the current sub-router. It is
+ // intentionally unexported so it cant be tampered.
+ routeParams RouteParams
+
+ // methodNotAllowed hint
+ methodNotAllowed bool
+}
+
+// Reset a routing context to its initial state.
+func (x *Context) Reset() {
+ x.Routes = nil
+ x.RoutePath = ""
+ x.RouteMethod = ""
+ x.RoutePatterns = x.RoutePatterns[:0]
+ x.URLParams.Keys = x.URLParams.Keys[:0]
+ x.URLParams.Values = x.URLParams.Values[:0]
+
+ x.routePattern = ""
+ x.routeParams.Keys = x.routeParams.Keys[:0]
+ x.routeParams.Values = x.routeParams.Values[:0]
+ x.methodNotAllowed = false
+}
+
+// URLParam returns the corresponding URL parameter value from the request
+// routing context.
+func (x *Context) URLParam(key string) string {
+ for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
+ if x.URLParams.Keys[k] == key {
+ return x.URLParams.Values[k]
+ }
+ }
+ return ""
+}
+
+// RoutePattern builds the routing pattern string for the particular
+// request, at the particular point during routing. This means, the value
+// will change throughout the execution of a request in a router. That is
+// why its advised to only use this value after calling the next handler.
+//
+// For example,
+//
+// func Instrument(next http.Handler) http.Handler {
+// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// next.ServeHTTP(w, r)
+// routePattern := chi.RouteContext(r.Context()).RoutePattern()
+// measure(w, r, routePattern)
+// })
+// }
+func (x *Context) RoutePattern() string {
+ routePattern := strings.Join(x.RoutePatterns, "")
+ return replaceWildcards(routePattern)
+}
+
+// replaceWildcards takes a route pattern and recursively replaces all
+// occurrences of "/*/" to "/".
+func replaceWildcards(p string) string {
+ if strings.Contains(p, "/*/") {
+ return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
+ }
+
+ return p
+}
+
+// RouteParams is a structure to track URL routing parameters efficiently.
+type RouteParams struct {
+ Keys, Values []string
+}
+
+// Add will append a URL parameter to the end of the route param
+func (s *RouteParams) Add(key, value string) {
+ s.Keys = append(s.Keys, key)
+ s.Values = append(s.Values, value)
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/middleware/basic_auth.go
new file mode 100644
index 00000000..87b2641a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/basic_auth.go
@@ -0,0 +1,32 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// BasicAuth implements a simple middleware handler for adding basic http auth to a route.
+func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ user, pass, ok := r.BasicAuth()
+ if !ok {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ credPass, credUserOk := creds[user]
+ if !credUserOk || pass != credPass {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+func basicAuthFailed(w http.ResponseWriter, realm string) {
+ w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
+ w.WriteHeader(http.StatusUnauthorized)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go
new file mode 100644
index 00000000..2f40cc15
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/compress.go
@@ -0,0 +1,399 @@
+package middleware
+
+import (
+ "bufio"
+ "compress/flate"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var defaultCompressibleContentTypes = []string{
+ "text/html",
+ "text/css",
+ "text/plain",
+ "text/javascript",
+ "application/javascript",
+ "application/x-javascript",
+ "application/json",
+ "application/atom+xml",
+ "application/rss+xml",
+ "image/svg+xml",
+}
+
+// Compress is a middleware that compresses response
+// body of a given content types to a data format based
+// on Accept-Encoding request header. It uses a given
+// compression level.
+//
+// NOTE: make sure to set the Content-Type header on your response
+// otherwise this middleware will not compress the response body. For ex, in
+// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
+// or set it manually.
+//
+// Passing a compression level of 5 is sensible value
+func Compress(level int, types ...string) func(next http.Handler) http.Handler {
+ compressor := NewCompressor(level, types...)
+ return compressor.Handler
+}
+
+// Compressor represents a set of encoding configurations.
+type Compressor struct {
+ level int // The compression level.
+ // The mapping of encoder names to encoder functions.
+ encoders map[string]EncoderFunc
+ // The mapping of pooled encoders to pools.
+ pooledEncoders map[string]*sync.Pool
+ // The set of content types allowed to be compressed.
+ allowedTypes map[string]struct{}
+ allowedWildcards map[string]struct{}
+ // The list of encoders in order of decreasing precedence.
+ encodingPrecedence []string
+}
+
+// NewCompressor creates a new Compressor that will handle encoding responses.
+//
+// The level should be one of the ones defined in the flate package.
+// The types are the content types that are allowed to be compressed.
+func NewCompressor(level int, types ...string) *Compressor {
+ // If types are provided, set those as the allowed types. If none are
+ // provided, use the default list.
+ allowedTypes := make(map[string]struct{})
+ allowedWildcards := make(map[string]struct{})
+ if len(types) > 0 {
+ for _, t := range types {
+ if strings.Contains(strings.TrimSuffix(t, "/*"), "*") {
+ panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t))
+ }
+ if strings.HasSuffix(t, "/*") {
+ allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{}
+ } else {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+ } else {
+ for _, t := range defaultCompressibleContentTypes {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+
+ c := &Compressor{
+ level: level,
+ encoders: make(map[string]EncoderFunc),
+ pooledEncoders: make(map[string]*sync.Pool),
+ allowedTypes: allowedTypes,
+ allowedWildcards: allowedWildcards,
+ }
+
+ // Set the default encoders. The precedence order uses the reverse
+ // ordering that the encoders were added. This means adding new encoders
+ // will move them to the front of the order.
+ //
+ // TODO:
+ // lzma: Opera.
+ // sdch: Chrome, Android. Gzip output + dictionary header.
+ // br: Brotli, see https://github.com/go-chi/chi/pull/326
+
+ // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
+ // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
+ // checksum compared to CRC-32 used in "gzip" and thus is faster.
+ //
+ // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
+ // raw DEFLATE data only, without the mentioned zlib wrapper.
+ // Because of this major confusion, most modern browsers try it
+ // both ways, first looking for zlib headers.
+ // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
+ //
+ // The list of browsers having problems is quite big, see:
+ // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
+ //
+ // That's why we prefer gzip over deflate. It's just more reliable
+ // and not significantly slower than gzip.
+ c.SetEncoder("deflate", encoderDeflate)
+
+ // TODO: Exception for old MSIE browsers that can't handle non-HTML?
+ // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ c.SetEncoder("gzip", encoderGzip)
+
+ // NOTE: Not implemented, intentionally:
+ // case "compress": // LZW. Deprecated.
+ // case "bzip2": // Too slow on-the-fly.
+ // case "zopfli": // Too slow on-the-fly.
+ // case "xz": // Too slow on-the-fly.
+ return c
+}
+
+// SetEncoder can be used to set the implementation of a compression algorithm.
+//
+// The encoding should be a standardised identifier. See:
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
+//
+// For example, add the Brotli algortithm:
+//
+// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
+//
+// compressor := middleware.NewCompressor(5, "text/html")
+// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
+// params := brotli_enc.NewBrotliParams()
+// params.SetQuality(level)
+// return brotli_enc.NewBrotliWriter(params, w)
+// })
+func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
+ encoding = strings.ToLower(encoding)
+ if encoding == "" {
+ panic("the encoding can not be empty")
+ }
+ if fn == nil {
+ panic("attempted to set a nil encoder function")
+ }
+
+ // If we are adding a new encoder that is already registered, we have to
+ // clear that one out first.
+ if _, ok := c.pooledEncoders[encoding]; ok {
+ delete(c.pooledEncoders, encoding)
+ }
+ if _, ok := c.encoders[encoding]; ok {
+ delete(c.encoders, encoding)
+ }
+
+ // If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
+ encoder := fn(ioutil.Discard, c.level)
+ if encoder != nil {
+ if _, ok := encoder.(ioResetterWriter); ok {
+ pool := &sync.Pool{
+ New: func() interface{} {
+ return fn(ioutil.Discard, c.level)
+ },
+ }
+ c.pooledEncoders[encoding] = pool
+ }
+ }
+ // If the encoder is not in the pooledEncoders, add it to the normal encoders.
+ if _, ok := c.pooledEncoders[encoding]; !ok {
+ c.encoders[encoding] = fn
+ }
+
+ for i, v := range c.encodingPrecedence {
+ if v == encoding {
+ c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
+ }
+ }
+
+ c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
+}
+
+// Handler returns a new middleware that will compress the response based on the
+// current Compressor.
+func (c *Compressor) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ encoder, encoding, cleanup := c.selectEncoder(r.Header, w)
+
+ cw := &compressResponseWriter{
+ ResponseWriter: w,
+ w: w,
+ contentTypes: c.allowedTypes,
+ contentWildcards: c.allowedWildcards,
+ encoding: encoding,
+ compressable: false, // determined in post-handler
+ }
+ if encoder != nil {
+ cw.w = encoder
+ }
+ // Re-add the encoder to the pool if applicable.
+ defer cleanup()
+ defer cw.Close()
+
+ next.ServeHTTP(cw, r)
+ })
+}
+
+// selectEncoder returns the encoder, the name of the encoder, and a closer function.
+func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) {
+ header := h.Get("Accept-Encoding")
+
+ // Parse the names of all accepted algorithms from the header.
+ accepted := strings.Split(strings.ToLower(header), ",")
+
+ // Find supported encoder by accepted list by precedence
+ for _, name := range c.encodingPrecedence {
+ if matchAcceptEncoding(accepted, name) {
+ if pool, ok := c.pooledEncoders[name]; ok {
+ encoder := pool.Get().(ioResetterWriter)
+ cleanup := func() {
+ pool.Put(encoder)
+ }
+ encoder.Reset(w)
+ return encoder, name, cleanup
+
+ }
+ if fn, ok := c.encoders[name]; ok {
+ return fn(w, c.level), name, func() {}
+ }
+ }
+
+ }
+
+ // No encoder found to match the accepted encoding
+ return nil, "", func() {}
+}
+
+func matchAcceptEncoding(accepted []string, encoding string) bool {
+ for _, v := range accepted {
+ if strings.Contains(v, encoding) {
+ return true
+ }
+ }
+ return false
+}
+
+// An EncoderFunc is a function that wraps the provided io.Writer with a
+// streaming compression algorithm and returns it.
+//
+// In case of failure, the function should return nil.
+type EncoderFunc func(w io.Writer, level int) io.Writer
+
+// Interface for types that allow resetting io.Writers.
+type ioResetterWriter interface {
+ io.Writer
+ Reset(w io.Writer)
+}
+
+type compressResponseWriter struct {
+ http.ResponseWriter
+
+ // The streaming encoder writer to be used if there is one. Otherwise,
+ // this is just the normal writer.
+ w io.Writer
+ encoding string
+ contentTypes map[string]struct{}
+ contentWildcards map[string]struct{}
+ wroteHeader bool
+ compressable bool
+}
+
+func (cw *compressResponseWriter) isCompressable() bool {
+ // Parse the first part of the Content-Type response header.
+ contentType := cw.Header().Get("Content-Type")
+ if idx := strings.Index(contentType, ";"); idx >= 0 {
+ contentType = contentType[0:idx]
+ }
+
+ // Is the content type compressable?
+ if _, ok := cw.contentTypes[contentType]; ok {
+ return true
+ }
+ if idx := strings.Index(contentType, "/"); idx > 0 {
+ contentType = contentType[0:idx]
+ _, ok := cw.contentWildcards[contentType]
+ return ok
+ }
+ return false
+}
+
+func (cw *compressResponseWriter) WriteHeader(code int) {
+ if cw.wroteHeader {
+ cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
+ return
+ }
+ cw.wroteHeader = true
+ defer cw.ResponseWriter.WriteHeader(code)
+
+ // Already compressed data?
+ if cw.Header().Get("Content-Encoding") != "" {
+ return
+ }
+
+ if !cw.isCompressable() {
+ cw.compressable = false
+ return
+ }
+
+ if cw.encoding != "" {
+ cw.compressable = true
+ cw.Header().Set("Content-Encoding", cw.encoding)
+ cw.Header().Set("Vary", "Accept-Encoding")
+
+ // The content-length after compression is unknown
+ cw.Header().Del("Content-Length")
+ }
+}
+
+func (cw *compressResponseWriter) Write(p []byte) (int, error) {
+ if !cw.wroteHeader {
+ cw.WriteHeader(http.StatusOK)
+ }
+
+ return cw.writer().Write(p)
+}
+
+func (cw *compressResponseWriter) writer() io.Writer {
+ if cw.compressable {
+ return cw.w
+ } else {
+ return cw.ResponseWriter
+ }
+}
+
+type compressFlusher interface {
+ Flush() error
+}
+
+func (cw *compressResponseWriter) Flush() {
+ if f, ok := cw.writer().(http.Flusher); ok {
+ f.Flush()
+ }
+ // If the underlying writer has a compression flush signature,
+ // call this Flush() method instead
+ if f, ok := cw.writer().(compressFlusher); ok {
+ f.Flush()
+
+ // Also flush the underlying response writer
+ if f, ok := cw.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+ }
+}
+
+func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := cw.writer().(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
+ if ps, ok := cw.writer().(http.Pusher); ok {
+ return ps.Push(target, opts)
+ }
+ return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Close() error {
+ if c, ok := cw.writer().(io.WriteCloser); ok {
+ return c.Close()
+ }
+ return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
+}
+
+func encoderGzip(w io.Writer, level int) io.Writer {
+ gw, err := gzip.NewWriterLevel(w, level)
+ if err != nil {
+ return nil
+ }
+ return gw
+}
+
+func encoderDeflate(w io.Writer, level int) io.Writer {
+ dw, err := flate.NewWriter(w, level)
+ if err != nil {
+ return nil
+ }
+ return dw
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go
new file mode 100644
index 00000000..07b5ce6f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_charset.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
+// An empty charset will allow requests with no Content-Type header or no specified charset.
+func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
+ for i, c := range charsets {
+ charsets[i] = strings.ToLower(c)
+ }
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+// Check the content encoding against a list of acceptable values.
+func contentEncoding(ce string, charsets ...string) bool {
+ _, ce = split(strings.ToLower(ce), ";")
+ _, ce = split(ce, "charset=")
+ ce, _ = split(ce, ";")
+ for _, c := range charsets {
+ if ce == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Split a string in two parts, cleaning any whitespace.
+func split(str, sep string) (string, string) {
+ var a, b string
+ var parts = strings.SplitN(str, sep, 2)
+ a = strings.TrimSpace(parts[0])
+ if len(parts) == 2 {
+ b = strings.TrimSpace(parts[1])
+ }
+
+ return a, b
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/middleware/content_encoding.go
new file mode 100644
index 00000000..e0b9ccc0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_encoding.go
@@ -0,0 +1,34 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler {
+ allowedEncodings := make(map[string]struct{}, len(contentEncoding))
+ for _, encoding := range contentEncoding {
+ allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{}
+ }
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ requestEncodings := r.Header["Content-Encoding"]
+ // skip check for empty content body or no Content-Encoding
+ if r.ContentLength == 0 {
+ next.ServeHTTP(w, r)
+ return
+ }
+ // All encodings in the request must be allowed
+ for _, encoding := range requestEncodings {
+ if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go
new file mode 100644
index 00000000..ee495787
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_type.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SetHeader is a convenience handler to set a response header key/value
+func SetHeader(key, value string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(key, value)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// AllowContentType enforces a whitelist of request Content-Types otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
+ cT := []string{}
+ for _, t := range contentTypes {
+ cT = append(cT, strings.ToLower(t))
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.ContentLength == 0 {
+ // skip check for empty content body
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
+ if i := strings.Index(s, ";"); i > -1 {
+ s = s[0:i]
+ }
+
+ for _, t := range cT {
+ if t == s {
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go
new file mode 100644
index 00000000..86068a96
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/get_head.go
@@ -0,0 +1,39 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// GetHead automatically route undefined HEAD requests to GET handlers.
+func GetHead(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "HEAD" {
+ rctx := chi.RouteContext(r.Context())
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Temporary routing context to look-ahead before routing the request
+ tctx := chi.NewRouteContext()
+
+ // Attempt to find a HEAD handler for the routing path, if not found, traverse
+ // the router as through its a GET route, but proceed with the request
+ // with the HEAD method.
+ if !rctx.Routes.Match(tctx, "HEAD", routePath) {
+ rctx.RouteMethod = "GET"
+ rctx.RoutePath = routePath
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
new file mode 100644
index 00000000..fe822fb5
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
@@ -0,0 +1,26 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Heartbeat endpoint middleware useful to setting up a path like
+// `/ping` that load balancers or uptime testing external services
+// can make a request before hitting any routes. It's also convenient
+// to place this above ACL middlewares as well.
+func Heartbeat(endpoint string) func(http.Handler) http.Handler {
+ f := func(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("."))
+ return
+ }
+ h.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+ return f
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go
new file mode 100644
index 00000000..158a6a39
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/logger.go
@@ -0,0 +1,155 @@
+package middleware
+
+import (
+ "bytes"
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "time"
+)
+
+var (
+ // LogEntryCtxKey is the context.Context key to store the request log entry.
+ LogEntryCtxKey = &contextKey{"LogEntry"}
+
+ // DefaultLogger is called by the Logger middleware handler to log each request.
+ // Its made a package-level variable so that it can be reconfigured for custom
+ // logging configurations.
+ DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false})
+)
+
+// Logger is a middleware that logs the start and end of each request, along
+// with some useful data about what was requested, what the response status was,
+// and how long it took to return. When standard output is a TTY, Logger will
+// print in color, otherwise it will print in black and white. Logger prints a
+// request ID if one is provided.
+//
+// Alternatively, look at https://github.com/goware/httplog for a more in-depth
+// http logger with structured logging support.
+func Logger(next http.Handler) http.Handler {
+ return DefaultLogger(next)
+}
+
+// RequestLogger returns a logger handler using a custom LogFormatter.
+func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ entry := f.NewLogEntry(r)
+ ww := NewWrapResponseWriter(w, r.ProtoMajor)
+
+ t1 := time.Now()
+ defer func() {
+ entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)
+ }()
+
+ next.ServeHTTP(ww, WithLogEntry(r, entry))
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// LogFormatter initiates the beginning of a new LogEntry per request.
+// See DefaultLogFormatter for an example implementation.
+type LogFormatter interface {
+ NewLogEntry(r *http.Request) LogEntry
+}
+
+// LogEntry records the final log when a request completes.
+// See defaultLogEntry for an example implementation.
+type LogEntry interface {
+ Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{})
+ Panic(v interface{}, stack []byte)
+}
+
+// GetLogEntry returns the in-context LogEntry for a request.
+func GetLogEntry(r *http.Request) LogEntry {
+ entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
+ return entry
+}
+
+// WithLogEntry sets the in-context LogEntry for a request.
+func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
+ r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
+ return r
+}
+
+// LoggerInterface accepts printing to stdlib logger or compatible logger.
+type LoggerInterface interface {
+ Print(v ...interface{})
+}
+
+// DefaultLogFormatter is a simple logger that implements a LogFormatter.
+type DefaultLogFormatter struct {
+ Logger LoggerInterface
+ NoColor bool
+}
+
+// NewLogEntry creates a new LogEntry for the request.
+func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
+ useColor := !l.NoColor
+ entry := &defaultLogEntry{
+ DefaultLogFormatter: l,
+ request: r,
+ buf: &bytes.Buffer{},
+ useColor: useColor,
+ }
+
+ reqID := GetReqID(r.Context())
+ if reqID != "" {
+ cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
+ }
+ cW(entry.buf, useColor, nCyan, "\"")
+ cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
+
+ scheme := "http"
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
+
+ entry.buf.WriteString("from ")
+ entry.buf.WriteString(r.RemoteAddr)
+ entry.buf.WriteString(" - ")
+
+ return entry
+}
+
+type defaultLogEntry struct {
+ *DefaultLogFormatter
+ request *http.Request
+ buf *bytes.Buffer
+ useColor bool
+}
+
+func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
+ switch {
+ case status < 200:
+ cW(l.buf, l.useColor, bBlue, "%03d", status)
+ case status < 300:
+ cW(l.buf, l.useColor, bGreen, "%03d", status)
+ case status < 400:
+ cW(l.buf, l.useColor, bCyan, "%03d", status)
+ case status < 500:
+ cW(l.buf, l.useColor, bYellow, "%03d", status)
+ default:
+ cW(l.buf, l.useColor, bRed, "%03d", status)
+ }
+
+ cW(l.buf, l.useColor, bBlue, " %dB", bytes)
+
+ l.buf.WriteString(" in ")
+ if elapsed < 500*time.Millisecond {
+ cW(l.buf, l.useColor, nGreen, "%s", elapsed)
+ } else if elapsed < 5*time.Second {
+ cW(l.buf, l.useColor, nYellow, "%s", elapsed)
+ } else {
+ cW(l.buf, l.useColor, nRed, "%s", elapsed)
+ }
+
+ l.Logger.Print(l.buf.String())
+}
+
+func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
+ PrintPrettyStack(v)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go
new file mode 100644
index 00000000..cc371e00
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/middleware.go
@@ -0,0 +1,23 @@
+package middleware
+
+import "net/http"
+
+// New will create a new middleware handler from a http.Handler.
+func New(h http.Handler) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.ServeHTTP(w, r)
+ })
+ }
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi/middleware context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go
new file mode 100644
index 00000000..2412829e
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/nocache.go
@@ -0,0 +1,58 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "time"
+)
+
+// Unix epoch time
+var epoch = time.Unix(0, 0).Format(time.RFC1123)
+
+// Taken from https://github.com/mytrile/nocache
+var noCacheHeaders = map[string]string{
+ "Expires": epoch,
+ "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
+ "Pragma": "no-cache",
+ "X-Accel-Expires": "0",
+}
+
+var etagHeaders = []string{
+ "ETag",
+ "If-Modified-Since",
+ "If-Match",
+ "If-None-Match",
+ "If-Range",
+ "If-Unmodified-Since",
+}
+
+// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
+// a router (or subrouter) from being cached by an upstream proxy and/or client.
+//
+// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
+// Expires: Thu, 01 Jan 1970 00:00:00 UTC
+// Cache-Control: no-cache, private, max-age=0
+// X-Accel-Expires: 0
+// Pragma: no-cache (for HTTP/1.0 proxies/clients)
+func NoCache(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+
+ // Delete any ETag headers that may have been set
+ for _, v := range etagHeaders {
+ if r.Header.Get(v) != "" {
+ r.Header.Del(v)
+ }
+ }
+
+ // Set our NoCache headers
+ for k, v := range noCacheHeaders {
+ w.Header().Set(k, v)
+ }
+
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go
new file mode 100644
index 00000000..1d44b825
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/profiler.go
@@ -0,0 +1,55 @@
+package middleware
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+
+ "github.com/go-chi/chi"
+)
+
+// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
+//
+// func MyService() http.Handler {
+// r := chi.NewRouter()
+// // ..middlewares
+// r.Mount("/debug", middleware.Profiler())
+// // ..routes
+// return r
+// }
+func Profiler() http.Handler {
+ r := chi.NewRouter()
+ r.Use(NoCache)
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/pprof/", 301)
+ })
+ r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/", 301)
+ })
+
+ r.HandleFunc("/pprof/*", pprof.Index)
+ r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
+ r.HandleFunc("/pprof/profile", pprof.Profile)
+ r.HandleFunc("/pprof/symbol", pprof.Symbol)
+ r.HandleFunc("/pprof/trace", pprof.Trace)
+ r.HandleFunc("/vars", expVars)
+
+ return r
+}
+
+// Replicated from expvar.go as not public.
+func expVars(w http.ResponseWriter, r *http.Request) {
+ first := true
+ w.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(w, "{\n")
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go
new file mode 100644
index 00000000..72db6ca9
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/realip.go
@@ -0,0 +1,54 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+
+// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
+// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
+// order).
+//
+// This middleware should be inserted fairly early in the middleware stack to
+// ensure that subsequent layers (e.g., request loggers) which examine the
+// RemoteAddr will see the intended value.
+//
+// You should only use this middleware if you can trust the headers passed to
+// you (in particular, the two headers this middleware uses), for example
+// because you have placed a reverse proxy like HAProxy or nginx in front of
+// chi. If your reverse proxies are configured to pass along arbitrary header
+// values from the client, or if you use this middleware without a reverse
+// proxy, malicious clients will be able to make you very sad (or, depending on
+// how you're using RemoteAddr, vulnerable to an attack of some sort).
+func RealIP(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if rip := realIP(r); rip != "" {
+ r.RemoteAddr = rip
+ }
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func realIP(r *http.Request) string {
+ var ip string
+
+ if xrip := r.Header.Get(xRealIP); xrip != "" {
+ ip = xrip
+ } else if xff := r.Header.Get(xForwardedFor); xff != "" {
+ i := strings.Index(xff, ", ")
+ if i == -1 {
+ i = len(xff)
+ }
+ ip = xff[:i]
+ }
+
+ return ip
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go
new file mode 100644
index 00000000..785b18c5
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/recoverer.go
@@ -0,0 +1,192 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime/debug"
+ "strings"
+)
+
+// Recoverer is a middleware that recovers from panics, logs the panic (and a
+// backtrace), and returns a HTTP 500 (Internal Server Error) status if
+// possible. Recoverer prints a request ID if one is provided.
+//
+// Alternatively, look at https://github.com/pressly/lg middleware pkgs.
+func Recoverer(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler {
+
+ logEntry := GetLogEntry(r)
+ if logEntry != nil {
+ logEntry.Panic(rvr, debug.Stack())
+ } else {
+ PrintPrettyStack(rvr)
+ }
+
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ }()
+
+ next.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func PrintPrettyStack(rvr interface{}) {
+ debugStack := debug.Stack()
+ s := prettyStack{}
+ out, err := s.parse(debugStack, rvr)
+ if err == nil {
+ os.Stderr.Write(out)
+ } else {
+ // print stdlib output as a fallback
+ os.Stderr.Write(debugStack)
+ }
+}
+
+type prettyStack struct {
+}
+
+func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) {
+ var err error
+ useColor := true
+ buf := &bytes.Buffer{}
+
+ cW(buf, false, bRed, "\n")
+ cW(buf, useColor, bCyan, " panic: ")
+ cW(buf, useColor, bBlue, "%v", rvr)
+ cW(buf, false, bWhite, "\n \n")
+
+ // process debug stack info
+ stack := strings.Split(string(debugStack), "\n")
+ lines := []string{}
+
+ // locate panic line, as we may have nested panics
+ for i := len(stack) - 1; i > 0; i-- {
+ lines = append(lines, stack[i])
+ if strings.HasPrefix(stack[i], "panic(0x") {
+ lines = lines[0 : len(lines)-2] // remove boilerplate
+ break
+ }
+ }
+
+ // reverse
+ for i := len(lines)/2 - 1; i >= 0; i-- {
+ opp := len(lines) - 1 - i
+ lines[i], lines[opp] = lines[opp], lines[i]
+ }
+
+ // decorate
+ for i, line := range lines {
+ lines[i], err = s.decorateLine(line, useColor, i)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for _, l := range lines {
+ fmt.Fprintf(buf, "%s", l)
+ }
+ return buf.Bytes(), nil
+}
+
+func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) {
+ line = strings.TrimSpace(line)
+ if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") {
+ return s.decorateSourceLine(line, useColor, num)
+ } else if strings.HasSuffix(line, ")") {
+ return s.decorateFuncCallLine(line, useColor, num)
+ } else {
+ if strings.HasPrefix(line, "\t") {
+ return strings.Replace(line, "\t", " ", 1), nil
+ } else {
+ return fmt.Sprintf(" %s\n", line), nil
+ }
+ }
+}
+
+func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, "(")
+ if idx < 0 {
+ return "", errors.New("not a func call line")
+ }
+
+ buf := &bytes.Buffer{}
+ pkg := line[0:idx]
+ // addr := line[idx:]
+ method := ""
+
+ idx = strings.LastIndex(pkg, string(os.PathSeparator))
+ if idx < 0 {
+ idx = strings.Index(pkg, ".")
+ method = pkg[idx:]
+ pkg = pkg[0:idx]
+ } else {
+ method = pkg[idx+1:]
+ pkg = pkg[0 : idx+1]
+ idx = strings.Index(method, ".")
+ pkg += method[0:idx]
+ method = method[idx:]
+ }
+ pkgColor := nYellow
+ methodColor := bGreen
+
+ if num == 0 {
+ cW(buf, useColor, bRed, " -> ")
+ pkgColor = bMagenta
+ methodColor = bRed
+ } else {
+ cW(buf, useColor, bWhite, " ")
+ }
+ cW(buf, useColor, pkgColor, "%s", pkg)
+ cW(buf, useColor, methodColor, "%s\n", method)
+ // cW(buf, useColor, nBlack, "%s", addr)
+ return buf.String(), nil
+}
+
+func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, ".go:")
+ if idx < 0 {
+ return "", errors.New("not a source line")
+ }
+
+ buf := &bytes.Buffer{}
+ path := line[0 : idx+3]
+ lineno := line[idx+3:]
+
+ idx = strings.LastIndex(path, string(os.PathSeparator))
+ dir := path[0 : idx+1]
+ file := path[idx+1:]
+
+ idx = strings.Index(lineno, " ")
+ if idx > 0 {
+ lineno = lineno[0:idx]
+ }
+ fileColor := bCyan
+ lineColor := bGreen
+
+ if num == 1 {
+ cW(buf, useColor, bRed, " -> ")
+ fileColor = bRed
+ lineColor = bMagenta
+ } else {
+ cW(buf, false, bWhite, " ")
+ }
+ cW(buf, useColor, bWhite, "%s", dir)
+ cW(buf, useColor, fileColor, "%s", file)
+ cW(buf, useColor, lineColor, "%s", lineno)
+ if num == 1 {
+ cW(buf, false, bWhite, "\n")
+ }
+ cW(buf, false, bWhite, "\n")
+
+ return buf.String(), nil
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go
new file mode 100644
index 00000000..4903ecc2
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/request_id.go
@@ -0,0 +1,96 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync/atomic"
+)
+
+// Key to use when setting the request ID.
+type ctxKeyRequestID int
+
+// RequestIDKey is the key that holds the unique request ID in a request context.
+const RequestIDKey ctxKeyRequestID = 0
+
+// RequestIDHeader is the name of the HTTP Header which contains the request id.
+// Exported so that it can be changed by developers
+var RequestIDHeader = "X-Request-Id"
+
+var prefix string
+var reqid uint64
+
+// A quick note on the statistics here: we're trying to calculate the chance that
+// two randomly generated base62 prefixes will collide. We use the formula from
+// http://en.wikipedia.org/wiki/Birthday_problem
+//
+// P[m, n] \approx 1 - e^{-m^2/2n}
+//
+// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
+// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
+//
+// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
+//
+// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
+// our purposes, and is surely more than anyone would ever need in practice -- a
+// process that is rebooted a handful of times a day for a hundred years has less
+// than a millionth of a percent chance of generating two colliding IDs.
+
+func init() {
+ hostname, err := os.Hostname()
+ if hostname == "" || err != nil {
+ hostname = "localhost"
+ }
+ var buf [12]byte
+ var b64 string
+ for len(b64) < 10 {
+ rand.Read(buf[:])
+ b64 = base64.StdEncoding.EncodeToString(buf[:])
+ b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
+ }
+
+ prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
+}
+
+// RequestID is a middleware that injects a request ID into the context of each
+// request. A request ID is a string of the form "host.example.com/random-0001",
+// where "random" is a base62 random string that uniquely identifies this go
+// process, and where the last number is an atomically incremented request
+// counter.
+func RequestID(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ requestID := r.Header.Get(RequestIDHeader)
+ if requestID == "" {
+ myid := atomic.AddUint64(&reqid, 1)
+ requestID = fmt.Sprintf("%s-%06d", prefix, myid)
+ }
+ ctx = context.WithValue(ctx, RequestIDKey, requestID)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ }
+ return http.HandlerFunc(fn)
+}
+
+// GetReqID returns a request ID from the given context if one is present.
+// Returns the empty string if a request ID cannot be found.
+func GetReqID(ctx context.Context) string {
+ if ctx == nil {
+ return ""
+ }
+ if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
+ return reqID
+ }
+ return ""
+}
+
+// NextRequestID generates the next request ID in the sequence.
+func NextRequestID() uint64 {
+ return atomic.AddUint64(&reqid, 1)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/route_headers.go b/vendor/github.com/go-chi/chi/middleware/route_headers.go
new file mode 100644
index 00000000..7ee30c87
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/route_headers.go
@@ -0,0 +1,160 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// RouteHeaders is a neat little header-based router that allows you to direct
+// the flow of a request through a middleware stack based on a request header.
+//
+// For example, lets say you'd like to setup multiple routers depending on the
+// request Host header, you could then do something as so:
+//
+// r := chi.NewRouter()
+// rSubdomain := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Host", "example.com", middleware.New(r)).
+// Route("Host", "*.example.com", middleware.New(rSubdomain)).
+// Handler)
+//
+// r.Get("/", h)
+// rSubdomain.Get("/", h2)
+//
+//
+// Another example, imagine you want to setup multiple CORS handlers, where for
+// your origin servers you allow authorized requests, but for third-party public
+// requests, authorization is disabled.
+//
+// r := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"https://api.skyweaver.net"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
+// AllowCredentials: true, // <----------<<< allow credentials
+// })).
+// Route("Origin", "*", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"*"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Content-Type"},
+// AllowCredentials: false, // <----------<<< do not allow credentials
+// })).
+// Handler)
+//
+func RouteHeaders() HeaderRouter {
+ return HeaderRouter{}
+}
+
+type HeaderRouter map[string][]HeaderRoute
+
+func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ patterns := []Pattern{}
+ for _, m := range match {
+ patterns = append(patterns, NewPattern(m))
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter {
+ hr["*"] = []HeaderRoute{{Middleware: handler}}
+ return hr
+}
+
+func (hr HeaderRouter) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if len(hr) == 0 {
+ // skip if no routes set
+ next.ServeHTTP(w, r)
+ }
+
+ // find first matching header route, and continue
+ for header, matchers := range hr {
+ headerValue := r.Header.Get(header)
+ if headerValue == "" {
+ continue
+ }
+ headerValue = strings.ToLower(headerValue)
+ for _, matcher := range matchers {
+ if matcher.IsMatch(headerValue) {
+ matcher.Middleware(next).ServeHTTP(w, r)
+ return
+ }
+ }
+ }
+
+ // if no match, check for "*" default route
+ matcher, ok := hr["*"]
+ if !ok || matcher[0].Middleware == nil {
+ next.ServeHTTP(w, r)
+ return
+ }
+ matcher[0].Middleware(next).ServeHTTP(w, r)
+ })
+}
+
+type HeaderRoute struct {
+ MatchAny []Pattern
+ MatchOne Pattern
+ Middleware func(next http.Handler) http.Handler
+}
+
+func (r HeaderRoute) IsMatch(value string) bool {
+ if len(r.MatchAny) > 0 {
+ for _, m := range r.MatchAny {
+ if m.Match(value) {
+ return true
+ }
+ }
+ } else if r.MatchOne.Match(value) {
+ return true
+ }
+ return false
+}
+
+type Pattern struct {
+ prefix string
+ suffix string
+ wildcard bool
+}
+
+func NewPattern(value string) Pattern {
+ p := Pattern{}
+ if i := strings.IndexByte(value, '*'); i >= 0 {
+ p.wildcard = true
+ p.prefix = value[0:i]
+ p.suffix = value[i+1:]
+ } else {
+ p.prefix = value
+ }
+ return p
+}
+
+func (p Pattern) Match(v string) bool {
+ if !p.wildcard {
+ if p.prefix == v {
+ return true
+ } else {
+ return false
+ }
+ }
+ return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go
new file mode 100644
index 00000000..2b8b1842
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/strip.go
@@ -0,0 +1,56 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// StripSlashes is a middleware that will match request paths with a trailing
+// slash, strip it from the path and continue routing through the mux, if a route
+// matches, then it will serve the handler.
+func StripSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ rctx.RoutePath = path[:len(path)-1]
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
+
+// RedirectSlashes is a middleware that will match request paths with a trailing
+// slash and redirect to the same path, less the trailing slash.
+//
+// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
+// see https://github.com/go-chi/chi/issues/343
+func RedirectSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ if r.URL.RawQuery != "" {
+ path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
+ } else {
+ path = path[:len(path)-1]
+ }
+ http.Redirect(w, r, path, 301)
+ return
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go
new file mode 100644
index 00000000..5ead7b92
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/terminal.go
@@ -0,0 +1,63 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ // Normal colors
+ nBlack = []byte{'\033', '[', '3', '0', 'm'}
+ nRed = []byte{'\033', '[', '3', '1', 'm'}
+ nGreen = []byte{'\033', '[', '3', '2', 'm'}
+ nYellow = []byte{'\033', '[', '3', '3', 'm'}
+ nBlue = []byte{'\033', '[', '3', '4', 'm'}
+ nMagenta = []byte{'\033', '[', '3', '5', 'm'}
+ nCyan = []byte{'\033', '[', '3', '6', 'm'}
+ nWhite = []byte{'\033', '[', '3', '7', 'm'}
+ // Bright colors
+ bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
+ bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
+ bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
+ bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
+ bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
+ bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
+ bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
+ bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
+
+ reset = []byte{'\033', '[', '0', 'm'}
+)
+
+var IsTTY bool
+
+func init() {
+ // This is sort of cheating: if stdout is a character device, we assume
+ // that means it's a TTY. Unfortunately, there are many non-TTY
+ // character devices, but fortunately stdout is rarely set to any of
+ // them.
+ //
+ // We could solve this properly by pulling in a dependency on
+ // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
+ // heuristic for whether to print in color or in black-and-white, I'd
+ // really rather not.
+ fi, err := os.Stdout.Stat()
+ if err == nil {
+ m := os.ModeDevice | os.ModeCharDevice
+ IsTTY = fi.Mode()&m == m
+ }
+}
+
+// colorWrite
+func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
+ if IsTTY && useColor {
+ w.Write(color)
+ }
+ fmt.Fprintf(w, s, args...)
+ if IsTTY && useColor {
+ w.Write(reset)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go
new file mode 100644
index 00000000..fdedd3c1
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/throttle.go
@@ -0,0 +1,132 @@
+package middleware
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+)
+
+const (
+ errCapacityExceeded = "Server capacity exceeded."
+ errTimedOut = "Timed out while waiting for a pending request to complete."
+ errContextCanceled = "Context was canceled."
+)
+
+var (
+ defaultBacklogTimeout = time.Second * 60
+)
+
+// ThrottleOpts represents a set of throttling options.
+type ThrottleOpts struct {
+ Limit int
+ BacklogLimit int
+ BacklogTimeout time.Duration
+ RetryAfterFn func(ctxDone bool) time.Duration
+}
+
+// Throttle is a middleware that limits number of currently processed requests
+// at a time across all users. Note: Throttle is not a rate-limiter per user,
+// instead it just puts a ceiling on the number of currentl in-flight requests
+// being processed from the point from where the Throttle middleware is mounted.
+func Throttle(limit int) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
+}
+
+// ThrottleBacklog is a middleware that limits number of currently processed
+// requests at a time and provides a backlog for holding a finite number of
+// pending requests.
+func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
+}
+
+// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
+func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
+ if opts.Limit < 1 {
+ panic("chi/middleware: Throttle expects limit > 0")
+ }
+
+ if opts.BacklogLimit < 0 {
+ panic("chi/middleware: Throttle expects backlogLimit to be positive")
+ }
+
+ t := throttler{
+ tokens: make(chan token, opts.Limit),
+ backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
+ backlogTimeout: opts.BacklogTimeout,
+ retryAfterFn: opts.RetryAfterFn,
+ }
+
+ // Filling tokens.
+ for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
+ if i < opts.Limit {
+ t.tokens <- token{}
+ }
+ t.backlogTokens <- token{}
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ select {
+
+ case <-ctx.Done():
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+
+ case btok := <-t.backlogTokens:
+ timer := time.NewTimer(t.backlogTimeout)
+
+ defer func() {
+ t.backlogTokens <- btok
+ }()
+
+ select {
+ case <-timer.C:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errTimedOut, http.StatusServiceUnavailable)
+ return
+ case <-ctx.Done():
+ timer.Stop()
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+ case tok := <-t.tokens:
+ defer func() {
+ timer.Stop()
+ t.tokens <- tok
+ }()
+ next.ServeHTTP(w, r)
+ }
+ return
+
+ default:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable)
+ return
+ }
+ }
+
+ return http.HandlerFunc(fn)
+ }
+}
+
+// token represents a request that is being processed.
+type token struct{}
+
+// throttler limits number of currently processed requests at a time.
+type throttler struct {
+ tokens chan token
+ backlogTokens chan token
+ backlogTimeout time.Duration
+ retryAfterFn func(ctxDone bool) time.Duration
+}
+
+// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
+func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
+ if t.retryAfterFn == nil {
+ return
+ }
+ w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go
new file mode 100644
index 00000000..8e373536
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/timeout.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+// Timeout is a middleware that cancels ctx after a given timeout and return
+// a 504 Gateway Timeout error to the client.
+//
+// It's required that you select the ctx.Done() channel to check for the signal
+// if the context has reached its deadline and return, otherwise the timeout
+// signal will be just ignored.
+//
+// ie. a route/handler may look like:
+//
+// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
+// ctx := r.Context()
+// processTime := time.Duration(rand.Intn(4)+1) * time.Second
+//
+// select {
+// case <-ctx.Done():
+// return
+//
+// case <-time.After(processTime):
+// // The above channel simulates some hard work.
+// }
+//
+// w.Write([]byte("done"))
+// })
+//
+func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), timeout)
+ defer func() {
+ cancel()
+ if ctx.Err() == context.DeadlineExceeded {
+ w.WriteHeader(http.StatusGatewayTimeout)
+ }
+ }()
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go
new file mode 100644
index 00000000..5749e4f3
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/url_format.go
@@ -0,0 +1,72 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/go-chi/chi"
+)
+
+var (
+ // URLFormatCtxKey is the context.Context key to store the URL format data
+ // for a request.
+ URLFormatCtxKey = &contextKey{"URLFormat"}
+)
+
+// URLFormat is a middleware that parses the url extension from a request path and stores it
+// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
+// trim the suffix from the routing path and continue routing.
+//
+// Routers should not include a url parameter for the suffix when using this middleware.
+//
+// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
+//
+// func routes() http.Handler {
+// r := chi.NewRouter()
+// r.Use(middleware.URLFormat)
+//
+// r.Get("/articles/{id}", ListArticles)
+//
+// return r
+// }
+//
+// func ListArticles(w http.ResponseWriter, r *http.Request) {
+// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
+//
+// switch urlFormat {
+// case "json":
+// render.JSON(w, r, articles)
+// case "xml:"
+// render.XML(w, r, articles)
+// default:
+// render.JSON(w, r, articles)
+// }
+// }
+//
+func URLFormat(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var format string
+ path := r.URL.Path
+
+ if strings.Index(path, ".") > 0 {
+ base := strings.LastIndex(path, "/")
+ idx := strings.Index(path[base:], ".")
+
+ if idx > 0 {
+ idx += base
+ format = path[idx+1:]
+
+ rctx := chi.RouteContext(r.Context())
+ rctx.RoutePath = path[:idx]
+ }
+ }
+
+ r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
+
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go
new file mode 100644
index 00000000..fbbd0393
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/value.go
@@ -0,0 +1,17 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+)
+
+// WithValue is a middleware that sets a given key/value in a context chain.
+func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(context.WithValue(r.Context(), key, val))
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
new file mode 100644
index 00000000..382a523e
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
@@ -0,0 +1,180 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
+// hook into various parts of the response process.
+func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
+ _, fl := w.(http.Flusher)
+
+ bw := basicWriter{ResponseWriter: w}
+
+ if protoMajor == 2 {
+ _, ps := w.(http.Pusher)
+ if fl && ps {
+ return &http2FancyWriter{bw}
+ }
+ } else {
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ if fl && hj && rf {
+ return &httpFancyWriter{bw}
+ }
+ }
+ if fl {
+ return &flushWriter{bw}
+ }
+
+ return &bw
+}
+
+// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
+// into various parts of the response process.
+type WrapResponseWriter interface {
+ http.ResponseWriter
+ // Status returns the HTTP status of the request, or 0 if one has not
+ // yet been sent.
+ Status() int
+ // BytesWritten returns the total number of bytes sent to the client.
+ BytesWritten() int
+ // Tee causes the response body to be written to the given io.Writer in
+ // addition to proxying the writes through. Only one io.Writer can be
+ // tee'd to at once: setting a second one will overwrite the first.
+ // Writes will be sent to the proxy before being written to this
+ // io.Writer. It is illegal for the tee'd writer to be modified
+ // concurrently with writes.
+ Tee(io.Writer)
+ // Unwrap returns the original proxied target.
+ Unwrap() http.ResponseWriter
+}
+
+// basicWriter wraps a http.ResponseWriter that implements the minimal
+// http.ResponseWriter interface.
+type basicWriter struct {
+ http.ResponseWriter
+ wroteHeader bool
+ code int
+ bytes int
+ tee io.Writer
+}
+
+func (b *basicWriter) WriteHeader(code int) {
+ if !b.wroteHeader {
+ b.code = code
+ b.wroteHeader = true
+ b.ResponseWriter.WriteHeader(code)
+ }
+}
+
+func (b *basicWriter) Write(buf []byte) (int, error) {
+ b.maybeWriteHeader()
+ n, err := b.ResponseWriter.Write(buf)
+ if b.tee != nil {
+ _, err2 := b.tee.Write(buf[:n])
+ // Prefer errors generated by the proxied writer.
+ if err == nil {
+ err = err2
+ }
+ }
+ b.bytes += n
+ return n, err
+}
+
+func (b *basicWriter) maybeWriteHeader() {
+ if !b.wroteHeader {
+ b.WriteHeader(http.StatusOK)
+ }
+}
+
+func (b *basicWriter) Status() int {
+ return b.code
+}
+
+func (b *basicWriter) BytesWritten() int {
+ return b.bytes
+}
+
+func (b *basicWriter) Tee(w io.Writer) {
+ b.tee = w
+}
+
+func (b *basicWriter) Unwrap() http.ResponseWriter {
+ return b.ResponseWriter
+}
+
+type flushWriter struct {
+ basicWriter
+}
+
+func (f *flushWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &flushWriter{}
+
+// httpFancyWriter is a HTTP writer that additionally satisfies
+// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type httpFancyWriter struct {
+ basicWriter
+}
+
+func (f *httpFancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
+ return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
+ if f.basicWriter.tee != nil {
+ n, err := io.Copy(&f.basicWriter, r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+ }
+ rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
+ f.basicWriter.maybeWriteHeader()
+ n, err := rf.ReadFrom(r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+}
+
+var _ http.Flusher = &httpFancyWriter{}
+var _ http.Hijacker = &httpFancyWriter{}
+var _ http.Pusher = &http2FancyWriter{}
+var _ io.ReaderFrom = &httpFancyWriter{}
+
+// http2FancyWriter is a HTTP2 writer that additionally satisfies
+// http.Flusher, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type http2FancyWriter struct {
+ basicWriter
+}
+
+func (f *http2FancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &http2FancyWriter{}
diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go
new file mode 100644
index 00000000..52950e97
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/mux.go
@@ -0,0 +1,466 @@
+package chi
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var _ Router = &Mux{}
+
+// Mux is a simple HTTP route multiplexer that parses a request path,
+// records any URL params, and executes an end handler. It implements
+// the http.Handler interface and is friendly with the standard library.
+//
+// Mux is designed to be fast, minimal and offer a powerful API for building
+// modular and composable HTTP services with a large set of handlers. It's
+// particularly useful for writing large REST API services that break a handler
+// into many smaller parts composed of middlewares and end handlers.
+type Mux struct {
+ // The radix trie router
+ tree *node
+
+ // The middleware stack
+ middlewares []func(http.Handler) http.Handler
+
+ // Controls the behaviour of middleware chain generation when a mux
+ // is registered as an inline group inside another mux.
+ inline bool
+ parent *Mux
+
+ // The computed mux handler made of the chained middleware stack and
+ // the tree router
+ handler http.Handler
+
+ // Routing context pool
+ pool *sync.Pool
+
+ // Custom route not found handler
+ notFoundHandler http.HandlerFunc
+
+ // Custom method not allowed handler
+ methodNotAllowedHandler http.HandlerFunc
+}
+
+// NewMux returns a newly initialized Mux object that implements the Router
+// interface.
+func NewMux() *Mux {
+ mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
+ mux.pool.New = func() interface{} {
+ return NewRouteContext()
+ }
+ return mux
+}
+
+// ServeHTTP is the single method of the http.Handler interface that makes
+// Mux interoperable with the standard library. It uses a sync.Pool to get and
+// reuse routing contexts for each request.
+func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Ensure the mux has some routes defined on the mux
+ if mx.handler == nil {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Check if a routing context already exists from a parent router.
+ rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
+ if rctx != nil {
+ mx.handler.ServeHTTP(w, r)
+ return
+ }
+
+ // Fetch a RouteContext object from the sync pool, and call the computed
+ // mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
+ // Once the request is finished, reset the routing context and put it back
+ // into the pool for reuse from another request.
+ rctx = mx.pool.Get().(*Context)
+ rctx.Reset()
+ rctx.Routes = mx
+
+ // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
+ r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
+
+ // Serve the request and once its done, put the request context back in the sync pool
+ mx.handler.ServeHTTP(w, r)
+ mx.pool.Put(rctx)
+}
+
+// Use appends a middleware handler to the Mux middleware stack.
+//
+// The middleware stack for any Mux will execute before searching for a matching
+// route to a specific handler, which provides opportunity to respond early,
+// change the course of the request execution, or set request-scoped values for
+// the next http.Handler.
+func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
+ if mx.handler != nil {
+ panic("chi: all middlewares must be defined before routes on a mux")
+ }
+ mx.middlewares = append(mx.middlewares, middlewares...)
+}
+
+// Handle adds the route `pattern` that matches any http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Handle(pattern string, handler http.Handler) {
+ mx.handle(mALL, pattern, handler)
+}
+
+// HandleFunc adds the route `pattern` that matches any http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mALL, pattern, handlerFn)
+}
+
+// Method adds the route `pattern` that matches `method` http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Method(method, pattern string, handler http.Handler) {
+ m, ok := methodMap[strings.ToUpper(method)]
+ if !ok {
+ panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
+ }
+ mx.handle(m, pattern, handler)
+}
+
+// MethodFunc adds the route `pattern` that matches `method` http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
+ mx.Method(method, pattern, handlerFn)
+}
+
+// Connect adds the route `pattern` that matches a CONNECT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mCONNECT, pattern, handlerFn)
+}
+
+// Delete adds the route `pattern` that matches a DELETE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mDELETE, pattern, handlerFn)
+}
+
+// Get adds the route `pattern` that matches a GET http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mGET, pattern, handlerFn)
+}
+
+// Head adds the route `pattern` that matches a HEAD http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mHEAD, pattern, handlerFn)
+}
+
+// Options adds the route `pattern` that matches a OPTIONS http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mOPTIONS, pattern, handlerFn)
+}
+
+// Patch adds the route `pattern` that matches a PATCH http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPATCH, pattern, handlerFn)
+}
+
+// Post adds the route `pattern` that matches a POST http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPOST, pattern, handlerFn)
+}
+
+// Put adds the route `pattern` that matches a PUT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPUT, pattern, handlerFn)
+}
+
+// Trace adds the route `pattern` that matches a TRACE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mTRACE, pattern, handlerFn)
+}
+
+// NotFound sets a custom http.HandlerFunc for routing paths that could
+// not be found. The default 404 handler is `http.NotFound`.
+func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
+ // Build NotFound handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the notFoundHandler from this point forward
+ m.notFoundHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.notFoundHandler == nil {
+ subMux.NotFound(hFn)
+ }
+ })
+}
+
+// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
+// method is unresolved. The default handler returns a 405 with an empty body.
+func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
+ // Build MethodNotAllowed handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the methodNotAllowedHandler from this point forward
+ m.methodNotAllowedHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.methodNotAllowedHandler == nil {
+ subMux.MethodNotAllowed(hFn)
+ }
+ })
+}
+
+// With adds inline middlewares for an endpoint handler.
+func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
+ // Similarly as in handle(), we must build the mux handler once additional
+ // middleware registration isn't allowed for this stack, like now.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Copy middlewares from parent inline muxs
+ var mws Middlewares
+ if mx.inline {
+ mws = make(Middlewares, len(mx.middlewares))
+ copy(mws, mx.middlewares)
+ }
+ mws = append(mws, middlewares...)
+
+ im := &Mux{
+ pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
+ notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
+ }
+
+ return im
+}
+
+// Group creates a new inline-Mux with a fresh middleware stack. It's useful
+// for a group of handlers along the same routing path that use an additional
+// set of middlewares. See _examples/.
+func (mx *Mux) Group(fn func(r Router)) Router {
+ im := mx.With().(*Mux)
+ if fn != nil {
+ fn(im)
+ }
+ return im
+}
+
+// Route creates a new Mux with a fresh middleware stack and mounts it
+// along the `pattern` as a subrouter. Effectively, this is a short-hand
+// call to Mount. See _examples/.
+func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
+ subRouter := NewRouter()
+ if fn != nil {
+ fn(subRouter)
+ }
+ mx.Mount(pattern, subRouter)
+ return subRouter
+}
+
+// Mount attaches another http.Handler or chi Router as a subrouter along a routing
+// path. It's very useful to split up a large API as many independent routers and
+// compose them as a single service using Mount. See _examples/.
+//
+// Note that Mount() simply sets a wildcard along the `pattern` that will continue
+// routing at the `handler`, which in most cases is another chi.Router. As a result,
+// if you define two Mount() routes on the exact same pattern the mount will panic.
+func (mx *Mux) Mount(pattern string, handler http.Handler) {
+ // Provide runtime safety for ensuring a pattern isn't mounted on an existing
+ // routing pattern.
+ if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
+ panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
+ }
+
+ // Assign sub-Router's with the parent not found & method not allowed handler if not specified.
+ subr, ok := handler.(*Mux)
+ if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
+ subr.NotFound(mx.notFoundHandler)
+ }
+ if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
+ subr.MethodNotAllowed(mx.methodNotAllowedHandler)
+ }
+
+ mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rctx := RouteContext(r.Context())
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ handler.ServeHTTP(w, r)
+ })
+
+ if pattern == "" || pattern[len(pattern)-1] != '/' {
+ mx.handle(mALL|mSTUB, pattern, mountHandler)
+ mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
+ pattern += "/"
+ }
+
+ method := mALL
+ subroutes, _ := handler.(Routes)
+ if subroutes != nil {
+ method |= mSTUB
+ }
+ n := mx.handle(method, pattern+"*", mountHandler)
+
+ if subroutes != nil {
+ n.subroutes = subroutes
+ }
+}
+
+// Routes returns a slice of routing information from the tree,
+// useful for traversing available routes of a router.
+func (mx *Mux) Routes() []Route {
+ return mx.tree.routes()
+}
+
+// Middlewares returns a slice of middleware handler functions.
+func (mx *Mux) Middlewares() Middlewares {
+ return mx.middlewares
+}
+
+// Match searches the routing tree for a handler that matches the method/path.
+// It's similar to routing a http request, but without executing the handler
+// thereafter.
+//
+// Note: the *Context state is updated during execution, so manage
+// the state carefully or make a NewRouteContext().
+func (mx *Mux) Match(rctx *Context, method, path string) bool {
+ m, ok := methodMap[method]
+ if !ok {
+ return false
+ }
+
+ node, _, h := mx.tree.FindRoute(rctx, m, path)
+
+ if node != nil && node.subroutes != nil {
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ return node.subroutes.Match(rctx, method, rctx.RoutePath)
+ }
+
+ return h != nil
+}
+
+// NotFoundHandler returns the default Mux 404 responder whenever a route
+// cannot be found.
+func (mx *Mux) NotFoundHandler() http.HandlerFunc {
+ if mx.notFoundHandler != nil {
+ return mx.notFoundHandler
+ }
+ return http.NotFound
+}
+
+// MethodNotAllowedHandler returns the default Mux 405 responder whenever
+// a method cannot be resolved for a route.
+func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
+ if mx.methodNotAllowedHandler != nil {
+ return mx.methodNotAllowedHandler
+ }
+ return methodNotAllowedHandler
+}
+
+// buildRouteHandler builds the single mux handler that is a chain of the middleware
+// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
+// point, no other middlewares can be registered on this Mux's stack. But you can still
+// compose additional middlewares via Group()'s or using a chained middleware handler.
+func (mx *Mux) buildRouteHandler() {
+ mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
+}
+
+// handle registers a http.Handler in the routing tree for a particular http method
+// and routing pattern.
+func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
+ if len(pattern) == 0 || pattern[0] != '/' {
+ panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
+ }
+
+ // Build the computed routing handler for this routing pattern.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Build endpoint handler with inline middlewares for the route
+ var h http.Handler
+ if mx.inline {
+ mx.handler = http.HandlerFunc(mx.routeHTTP)
+ h = Chain(mx.middlewares...).Handler(handler)
+ } else {
+ h = handler
+ }
+
+ // Add the endpoint to the tree and return the node
+ return mx.tree.InsertRoute(method, pattern, h)
+}
+
+// routeHTTP routes a http.Request through the Mux routing tree to serve
+// the matching handler for a particular http method.
+func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Grab the route context object
+ rctx := r.Context().Value(RouteCtxKey).(*Context)
+
+ // The request routing path
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Check if method is supported by chi
+ if rctx.RouteMethod == "" {
+ rctx.RouteMethod = r.Method
+ }
+ method, ok := methodMap[rctx.RouteMethod]
+ if !ok {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Find the route
+ if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
+ h.ServeHTTP(w, r)
+ return
+ }
+ if rctx.methodNotAllowed {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ } else {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ }
+}
+
+func (mx *Mux) nextRoutePath(rctx *Context) string {
+ routePath := "/"
+ nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
+ if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
+ routePath = "/" + rctx.routeParams.Values[nx]
+ }
+ return routePath
+}
+
+// Recursively update data on child routers.
+func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
+ for _, r := range mx.tree.routes() {
+ subMux, ok := r.SubRoutes.(*Mux)
+ if !ok {
+ continue
+ }
+ fn(subMux)
+ }
+}
+
+// methodNotAllowedHandler is a helper function to respond with a 405,
+// method not allowed.
+func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(405)
+ w.Write(nil)
+}
diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go
new file mode 100644
index 00000000..59b5b5f7
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/tree.go
@@ -0,0 +1,865 @@
+package chi
+
+// Radix tree implementation below is a based on the original work by
+// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
+// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type methodTyp int
+
+const (
+ mSTUB methodTyp = 1 << iota
+ mCONNECT
+ mDELETE
+ mGET
+ mHEAD
+ mOPTIONS
+ mPATCH
+ mPOST
+ mPUT
+ mTRACE
+)
+
+var mALL = mCONNECT | mDELETE | mGET | mHEAD |
+ mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
+
+var methodMap = map[string]methodTyp{
+ http.MethodConnect: mCONNECT,
+ http.MethodDelete: mDELETE,
+ http.MethodGet: mGET,
+ http.MethodHead: mHEAD,
+ http.MethodOptions: mOPTIONS,
+ http.MethodPatch: mPATCH,
+ http.MethodPost: mPOST,
+ http.MethodPut: mPUT,
+ http.MethodTrace: mTRACE,
+}
+
+// RegisterMethod adds support for custom HTTP method handlers, available
+// via Router#Method and Router#MethodFunc
+func RegisterMethod(method string) {
+ if method == "" {
+ return
+ }
+ method = strings.ToUpper(method)
+ if _, ok := methodMap[method]; ok {
+ return
+ }
+ n := len(methodMap)
+ if n > strconv.IntSize {
+ panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
+ }
+ mt := methodTyp(math.Exp2(float64(n)))
+ methodMap[method] = mt
+ mALL |= mt
+}
+
+type nodeTyp uint8
+
+const (
+ ntStatic nodeTyp = iota // /home
+ ntRegexp // /{id:[0-9]+}
+ ntParam // /{user}
+ ntCatchAll // /api/v1/*
+)
+
+type node struct {
+ // node type: static, regexp, param, catchAll
+ typ nodeTyp
+
+ // first byte of the prefix
+ label byte
+
+ // first byte of the child prefix
+ tail byte
+
+ // prefix is the common prefix we ignore
+ prefix string
+
+ // regexp matcher for regexp nodes
+ rex *regexp.Regexp
+
+ // HTTP handler endpoints on the leaf node
+ endpoints endpoints
+
+ // subroutes on the leaf node
+ subroutes Routes
+
+ // child nodes should be stored in-order for iteration,
+ // in groups of the node type.
+ children [ntCatchAll + 1]nodes
+}
+
+// endpoints is a mapping of http method constants to handlers
+// for a given route.
+type endpoints map[methodTyp]*endpoint
+
+type endpoint struct {
+ // endpoint handler
+ handler http.Handler
+
+ // pattern is the routing pattern for handler nodes
+ pattern string
+
+ // parameter keys recorded on handler nodes
+ paramKeys []string
+}
+
+func (s endpoints) Value(method methodTyp) *endpoint {
+ mh, ok := s[method]
+ if !ok {
+ mh = &endpoint{}
+ s[method] = mh
+ }
+ return mh
+}
+
+func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
+ var parent *node
+ search := pattern
+
+ for {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ // Insert or update the node's leaf handler
+ n.setEndpoint(method, handler, pattern)
+ return n
+ }
+
+ // We're going to be searching for a wild node next,
+ // in this case, we need to get the tail
+ var label = search[0]
+ var segTail byte
+ var segEndIdx int
+ var segTyp nodeTyp
+ var segRexpat string
+ if label == '{' || label == '*' {
+ segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
+ }
+
+ var prefix string
+ if segTyp == ntRegexp {
+ prefix = segRexpat
+ }
+
+ // Look for the edge to attach to
+ parent = n
+ n = n.getEdge(segTyp, label, segTail, prefix)
+
+ // No edge, create one
+ if n == nil {
+ child := &node{label: label, tail: segTail, prefix: search}
+ hn := parent.addChild(child, search)
+ hn.setEndpoint(method, handler, pattern)
+
+ return hn
+ }
+
+ // Found an edge to match the pattern
+
+ if n.typ > ntStatic {
+ // We found a param node, trim the param from the search path and continue.
+ // This param/wild pattern segment would already be on the tree from a previous
+ // call to addChild when creating a new node.
+ search = search[segEndIdx:]
+ continue
+ }
+
+ // Static nodes fall below here.
+ // Determine longest prefix of the search key on match.
+ commonPrefix := longestPrefix(search, n.prefix)
+ if commonPrefix == len(n.prefix) {
+ // the common prefix is as long as the current node's prefix we're attempting to insert.
+ // keep the search going.
+ search = search[commonPrefix:]
+ continue
+ }
+
+ // Split the node
+ child := &node{
+ typ: ntStatic,
+ prefix: search[:commonPrefix],
+ }
+ parent.replaceChild(search[0], segTail, child)
+
+ // Restore the existing node
+ n.label = n.prefix[commonPrefix]
+ n.prefix = n.prefix[commonPrefix:]
+ child.addChild(n, n.prefix)
+
+ // If the new key is a subset, set the method/handler on this node and finish.
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ child.setEndpoint(method, handler, pattern)
+ return child
+ }
+
+ // Create a new edge for the node
+ subchild := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn := child.addChild(subchild, search)
+ hn.setEndpoint(method, handler, pattern)
+ return hn
+ }
+}
+
+// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
+// For a URL router like chi's, we split the static, param, regexp and wildcard segments
+// into different nodes. In addition, addChild will recursively call itself until every
+// pattern segment is added to the url pattern tree as individual nodes, depending on type.
+func (n *node) addChild(child *node, prefix string) *node {
+ search := prefix
+
+ // handler leaf node added to the tree is the child.
+ // this may be overridden later down the flow
+ hn := child
+
+ // Parse next segment
+ segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
+
+ // Add child depending on next up segment
+ switch segTyp {
+
+ case ntStatic:
+ // Search prefix is all static (that is, has no params in path)
+ // noop
+
+ default:
+ // Search prefix contains a param, regexp or wildcard
+
+ if segTyp == ntRegexp {
+ rex, err := regexp.Compile(segRexpat)
+ if err != nil {
+ panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
+ }
+ child.prefix = segRexpat
+ child.rex = rex
+ }
+
+ if segStartIdx == 0 {
+ // Route starts with a param
+ child.typ = segTyp
+
+ if segTyp == ntCatchAll {
+ segStartIdx = -1
+ } else {
+ segStartIdx = segEndIdx
+ }
+ if segStartIdx < 0 {
+ segStartIdx = len(search)
+ }
+ child.tail = segTail // for params, we set the tail
+
+ if segStartIdx != len(search) {
+ // add static edge for the remaining part, split the end.
+ // its not possible to have adjacent param nodes, so its certainly
+ // going to be a static node next.
+
+ search = search[segStartIdx:] // advance search position
+
+ nn := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn = child.addChild(nn, search)
+ }
+
+ } else if segStartIdx > 0 {
+ // Route has some param
+
+ // starts with a static segment
+ child.typ = ntStatic
+ child.prefix = search[:segStartIdx]
+ child.rex = nil
+
+ // add the param edge node
+ search = search[segStartIdx:]
+
+ nn := &node{
+ typ: segTyp,
+ label: search[0],
+ tail: segTail,
+ }
+ hn = child.addChild(nn, search)
+
+ }
+ }
+
+ n.children[child.typ] = append(n.children[child.typ], child)
+ n.children[child.typ].Sort()
+ return hn
+}
+
+func (n *node) replaceChild(label, tail byte, child *node) {
+ for i := 0; i < len(n.children[child.typ]); i++ {
+ if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
+ n.children[child.typ][i] = child
+ n.children[child.typ][i].label = label
+ n.children[child.typ][i].tail = tail
+ return
+ }
+ }
+ panic("chi: replacing missing child")
+}
+
+func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
+ nds := n.children[ntyp]
+ for i := 0; i < len(nds); i++ {
+ if nds[i].label == label && nds[i].tail == tail {
+ if ntyp == ntRegexp && nds[i].prefix != prefix {
+ continue
+ }
+ return nds[i]
+ }
+ }
+ return nil
+}
+
+func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
+ // Set the handler for the method type on the node
+ if n.endpoints == nil {
+ n.endpoints = make(endpoints)
+ }
+
+ paramKeys := patParamKeys(pattern)
+
+ if method&mSTUB == mSTUB {
+ n.endpoints.Value(mSTUB).handler = handler
+ }
+ if method&mALL == mALL {
+ h := n.endpoints.Value(mALL)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ for _, m := range methodMap {
+ h := n.endpoints.Value(m)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+ } else {
+ h := n.endpoints.Value(method)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+}
+
+func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
+ // Reset the context routing pattern and params
+ rctx.routePattern = ""
+ rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
+ rctx.routeParams.Values = rctx.routeParams.Values[:0]
+
+ // Find the routing handlers for the path
+ rn := n.findRoute(rctx, method, path)
+ if rn == nil {
+ return nil, nil, nil
+ }
+
+ // Record the routing params in the request lifecycle
+ rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
+ rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
+
+ // Record the routing pattern in the request lifecycle
+ if rn.endpoints[method].pattern != "" {
+ rctx.routePattern = rn.endpoints[method].pattern
+ rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
+ }
+
+ return rn, rn.endpoints, rn.endpoints[method].handler
+}
+
+// Recursive edge traversal by checking all nodeTyp groups along the way.
+// It's like searching through a multi-dimensional radix trie.
+func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
+ nn := n
+ search := path
+
+ for t, nds := range nn.children {
+ ntyp := nodeTyp(t)
+ if len(nds) == 0 {
+ continue
+ }
+
+ var xn *node
+ xsearch := search
+
+ var label byte
+ if search != "" {
+ label = search[0]
+ }
+
+ switch ntyp {
+ case ntStatic:
+ xn = nds.findEdge(label)
+ if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
+ continue
+ }
+ xsearch = xsearch[len(xn.prefix):]
+
+ case ntParam, ntRegexp:
+ // short-circuit and return no matching route for empty param values
+ if xsearch == "" {
+ continue
+ }
+
+ // serially loop through each node grouped by the tail delimiter
+ for idx := 0; idx < len(nds); idx++ {
+ xn = nds[idx]
+
+ // label for param nodes is the delimiter byte
+ p := strings.IndexByte(xsearch, xn.tail)
+
+ if p < 0 {
+ if xn.tail == '/' {
+ p = len(xsearch)
+ } else {
+ continue
+ }
+ }
+
+ if ntyp == ntRegexp && xn.rex != nil {
+ if !xn.rex.Match([]byte(xsearch[:p])) {
+ continue
+ }
+ } else if strings.IndexByte(xsearch[:p], '/') != -1 {
+ // avoid a match across path segments
+ continue
+ }
+
+ prevlen := len(rctx.routeParams.Values)
+ rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
+ xsearch = xsearch[p:]
+
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node on this branch
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // not found on this branch, reset vars
+ rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
+ xsearch = search
+ }
+
+ rctx.routeParams.Values = append(rctx.routeParams.Values, "")
+
+ default:
+ // catch-all nodes
+ rctx.routeParams.Values = append(rctx.routeParams.Values, search)
+ xn = nds[0]
+ xsearch = ""
+ }
+
+ if xn == nil {
+ continue
+ }
+
+ // did we find it yet?
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node..
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // Did not find final handler, let's remove the param here if it was set
+ if xn.typ > ntStatic {
+ if len(rctx.routeParams.Values) > 0 {
+ rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
+ nds := n.children[ntyp]
+ num := len(nds)
+ idx := 0
+
+ switch ntyp {
+ case ntStatic, ntParam, ntRegexp:
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > nds[idx].label {
+ i = idx + 1
+ } else if label < nds[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if nds[idx].label != label {
+ return nil
+ }
+ return nds[idx]
+
+ default: // catch all
+ return nds[idx]
+ }
+}
+
+func (n *node) isLeaf() bool {
+ return n.endpoints != nil
+}
+
+func (n *node) findPattern(pattern string) bool {
+ nn := n
+ for _, nds := range nn.children {
+ if len(nds) == 0 {
+ continue
+ }
+
+ n = nn.findEdge(nds[0].typ, pattern[0])
+ if n == nil {
+ continue
+ }
+
+ var idx int
+ var xpattern string
+
+ switch n.typ {
+ case ntStatic:
+ idx = longestPrefix(pattern, n.prefix)
+ if idx < len(n.prefix) {
+ continue
+ }
+
+ case ntParam, ntRegexp:
+ idx = strings.IndexByte(pattern, '}') + 1
+
+ case ntCatchAll:
+ idx = longestPrefix(pattern, "*")
+
+ default:
+ panic("chi: unknown node type")
+ }
+
+ xpattern = pattern[idx:]
+ if len(xpattern) == 0 {
+ return true
+ }
+
+ return n.findPattern(xpattern)
+ }
+ return false
+}
+
+func (n *node) routes() []Route {
+ rts := []Route{}
+
+ n.walk(func(eps endpoints, subroutes Routes) bool {
+ if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
+ return false
+ }
+
+ // Group methodHandlers by unique patterns
+ pats := make(map[string]endpoints)
+
+ for mt, h := range eps {
+ if h.pattern == "" {
+ continue
+ }
+ p, ok := pats[h.pattern]
+ if !ok {
+ p = endpoints{}
+ pats[h.pattern] = p
+ }
+ p[mt] = h
+ }
+
+ for p, mh := range pats {
+ hs := make(map[string]http.Handler)
+ if mh[mALL] != nil && mh[mALL].handler != nil {
+ hs["*"] = mh[mALL].handler
+ }
+
+ for mt, h := range mh {
+ if h.handler == nil {
+ continue
+ }
+ m := methodTypString(mt)
+ if m == "" {
+ continue
+ }
+ hs[m] = h.handler
+ }
+
+ rt := Route{p, hs, subroutes}
+ rts = append(rts, rt)
+ }
+
+ return false
+ })
+
+ return rts
+}
+
+func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
+ // Visit the leaf values if any
+ if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, ns := range n.children {
+ for _, cn := range ns {
+ if cn.walk(fn) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// patNextSegment returns the next segment details from a pattern:
+// node type, param key, regexp string, param tail byte, param starting index, param ending index
+func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
+ ps := strings.Index(pattern, "{")
+ ws := strings.Index(pattern, "*")
+
+ if ps < 0 && ws < 0 {
+ return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
+ }
+
+ // Sanity check
+ if ps >= 0 && ws >= 0 && ws < ps {
+ panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
+ }
+
+ var tail byte = '/' // Default endpoint tail to / byte
+
+ if ps >= 0 {
+ // Param/Regexp pattern is next
+ nt := ntParam
+
+ // Read to closing } taking into account opens and closes in curl count (cc)
+ cc := 0
+ pe := ps
+ for i, c := range pattern[ps:] {
+ if c == '{' {
+ cc++
+ } else if c == '}' {
+ cc--
+ if cc == 0 {
+ pe = ps + i
+ break
+ }
+ }
+ }
+ if pe == ps {
+ panic("chi: route param closing delimiter '}' is missing")
+ }
+
+ key := pattern[ps+1 : pe]
+ pe++ // set end to next position
+
+ if pe < len(pattern) {
+ tail = pattern[pe]
+ }
+
+ var rexpat string
+ if idx := strings.Index(key, ":"); idx >= 0 {
+ nt = ntRegexp
+ rexpat = key[idx+1:]
+ key = key[:idx]
+ }
+
+ if len(rexpat) > 0 {
+ if rexpat[0] != '^' {
+ rexpat = "^" + rexpat
+ }
+ if rexpat[len(rexpat)-1] != '$' {
+ rexpat += "$"
+ }
+ }
+
+ return nt, key, rexpat, tail, ps, pe
+ }
+
+ // Wildcard pattern as finale
+ if ws < len(pattern)-1 {
+ panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
+ }
+ return ntCatchAll, "*", "", 0, ws, len(pattern)
+}
+
+func patParamKeys(pattern string) []string {
+ pat := pattern
+ paramKeys := []string{}
+ for {
+ ptyp, paramKey, _, _, _, e := patNextSegment(pat)
+ if ptyp == ntStatic {
+ return paramKeys
+ }
+ for i := 0; i < len(paramKeys); i++ {
+ if paramKeys[i] == paramKey {
+ panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
+ }
+ }
+ paramKeys = append(paramKeys, paramKey)
+ pat = pat[e:]
+ }
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+func methodTypString(method methodTyp) string {
+ for s, t := range methodMap {
+ if method == t {
+ return s
+ }
+ }
+ return ""
+}
+
+type nodes []*node
+
+// Sort the list of nodes by label
+func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
+func (ns nodes) Len() int { return len(ns) }
+func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
+
+// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
+// The list order determines the traversal order.
+func (ns nodes) tailSort() {
+ for i := len(ns) - 1; i >= 0; i-- {
+ if ns[i].typ > ntStatic && ns[i].tail == '/' {
+ ns.Swap(i, len(ns)-1)
+ return
+ }
+ }
+}
+
+func (ns nodes) findEdge(label byte) *node {
+ num := len(ns)
+ idx := 0
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > ns[idx].label {
+ i = idx + 1
+ } else if label < ns[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if ns[idx].label != label {
+ return nil
+ }
+ return ns[idx]
+}
+
+// Route describes the details of a routing handler.
+// Handlers map key is an HTTP method
+type Route struct {
+ Pattern string
+ Handlers map[string]http.Handler
+ SubRoutes Routes
+}
+
+// WalkFunc is the type of the function called for each method and route visited by Walk.
+type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
+
+// Walk walks any router tree that implements Routes interface.
+func Walk(r Routes, walkFn WalkFunc) error {
+ return walk(r, walkFn, "")
+}
+
+func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
+ for _, route := range r.Routes() {
+ mws := make([]func(http.Handler) http.Handler, len(parentMw))
+ copy(mws, parentMw)
+ mws = append(mws, r.Middlewares()...)
+
+ if route.SubRoutes != nil {
+ if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
+ return err
+ }
+ continue
+ }
+
+ for method, handler := range route.Handlers {
+ if method == "*" {
+ // Ignore a "catchAll" method, since we pass down all the specific methods for each route.
+ continue
+ }
+
+ fullRoute := parentRoute + route.Pattern
+ fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
+
+ if chain, ok := handler.(*ChainHandler); ok {
+ if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
+ return err
+ }
+ } else {
+ if err := walkFn(method, fullRoute, handler, mws...); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore
new file mode 100644
index 00000000..eb29ebae
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore
@@ -0,0 +1,2 @@
+jose-util/jose-util
+jose-util.t.err
\ No newline at end of file
diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
new file mode 100644
index 00000000..2a577a8f
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
@@ -0,0 +1,53 @@
+# https://github.com/golangci/golangci-lint
+
+run:
+ skip-files:
+ - doc_test.go
+ modules-download-mode: readonly
+
+linters:
+ enable-all: true
+ disable:
+ - gochecknoglobals
+ - goconst
+ - lll
+ - maligned
+ - nakedret
+ - scopelint
+ - unparam
+ - funlen # added in 1.18 (requires go-jose changes before it can be enabled)
+
+linters-settings:
+ gocyclo:
+ min-complexity: 35
+
+issues:
+ exclude-rules:
+ - text: "don't use ALL_CAPS in Go names"
+ linters:
+ - golint
+ - text: "hardcoded credentials"
+ linters:
+ - gosec
+ - text: "weak cryptographic primitive"
+ linters:
+ - gosec
+ - path: json/
+ linters:
+ - dupl
+ - errcheck
+ - gocritic
+ - gocyclo
+ - golint
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - unused
+ - path: _test\.go
+ linters:
+ - scopelint
+ - path: jwk.go
+ linters:
+ - gocyclo
diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
new file mode 100644
index 00000000..48de631b
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
@@ -0,0 +1,33 @@
+language: go
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+ - "1.13.x"
+ - "1.14.x"
+ - tip
+
+before_script:
+ - export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+ - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
+ - pip install cram --user
+
+script:
+ - go test -v -covermode=count -coverprofile=profile.cov .
+ - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
+ - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
+ - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
+ - go test -v ./json # no coverage for forked encoding/json package
+ - golangci-lint run
+ - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
+ - cd ..
+
+after_success:
+ - gocovmerge *.cov */*.cov > merged.coverprofile
+ - goveralls -coverprofile merged.coverprofile -service=travis-ci
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
new file mode 100644
index 00000000..28bdd2fc
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
@@ -0,0 +1,72 @@
+# v4.0.1
+
+## Fixed
+
+ - An attacker could send a JWE containing compressed data that used large
+ amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`.
+ Those functions now return an error if the decompressed data would exceed
+ 250kB or 10x the compressed size (whichever is larger). Thanks to
+ Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj)
+ for reporting.
+
+# v4.0.0
+
+This release makes some breaking changes in order to more thoroughly
+address the vulnerabilities discussed in [Three New Attacks Against JSON Web
+Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot
+token".
+
+## Changed
+
+ - Limit JWT encryption types (exclude password or public key types) (#78)
+ - Enforce minimum length for HMAC keys (#85)
+ - jwt: match any audience in a list, rather than requiring all audiences (#81)
+ - jwt: accept only Compact Serialization (#75)
+ - jws: Add expected algorithms for signatures (#74)
+ - Require specifying expected algorithms for ParseEncrypted,
+ ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned,
+ jwt.ParseSignedAndEncrypted (#69, #74)
+ - Usually there is a small, known set of appropriate algorithms for a program
+ to use and it's a mistake to allow unexpected algorithms. For instance the
+ "billion hash attack" relies in part on programs accepting the PBES2
+ encryption algorithm and doing the necessary work even if they weren't
+ specifically configured to allow PBES2.
+ - Revert "Strip padding off base64 strings" (#82)
+ - The specs require base64url encoding without padding.
+ - Minimum supported Go version is now 1.21
+
+## Added
+
+ - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON.
+ - These allow parsing a specific serialization, as opposed to ParseSigned and
+ ParseEncrypted, which try to automatically detect which serialization was
+ provided. It's common to require a specific serialization for a specific
+ protocol - for instance JWT requires Compact serialization.
+
+[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
+
+# v3.0.2
+
+## Fixed
+
+ - DecryptMulti: handle decompression error (#19)
+
+## Changed
+
+ - jwe/CompactSerialize: improve performance (#67)
+ - Increase the default number of PBKDF2 iterations to 600k (#48)
+ - Return the proper algorithm for ECDSA keys (#45)
+
+## Added
+
+ - Add Thumbprint support for opaque signers (#38)
+
+# v3.0.1
+
+## Fixed
+
+ - Security issue: an attacker specifying a large "p2c" value can cause
+ JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
+ amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
+ disclosure and to Tom Tervoort for originally publishing the category of attack.
+ https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
new file mode 100644
index 00000000..b63e1f8f
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
+will be prompted to sign once a pull request is opened.
+
+[1]: https://cla-assistant.io/
diff --git a/vendor/github.com/go-jose/go-jose/v4/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md
new file mode 100644
index 00000000..79a7c5ec
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/README.md
@@ -0,0 +1,114 @@
+# Go JOSE
+
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
+[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
+Tables of supported algorithms are shown below. The library supports both
+the compact and JWS/JWE JSON Serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+[Version 4](https://github.com/go-jose/go-jose)
+([branch](https://github.com/go-jose/go-jose/tree/main),
+[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
+
+ import "github.com/go-jose/go-jose/v4"
+
+The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
+are still useable but not actively developed anymore.
+
+Version 3, in this repo, is still receiving security fixes but not functionality
+updates.
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES1
+ Direct encryption | dir1
+
+1. Not supported in multi-recipient mode
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA2
+
+2. Only available in version 2 of the package
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
+ EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+1. Only available in version 2 or later of the package
+
+## Examples
+
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example as well.
diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
new file mode 100644
index 00000000..2f18a75a
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
+
+## Supported Versions
+| Version | Supported |
+| ------- | ----------|
+| >= v3 | ✓ |
+| v2 | ✗ |
+| v1 | ✗ |
+
+## Reporting a vulnerability
+
+Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
new file mode 100644
index 00000000..f8d5774e
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
@@ -0,0 +1,595 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ josecipher "github.com/go-jose/go-jose/v4/cipher"
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
+ // random parameter is legacy and ignored, and it can be nil.
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("go-jose/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
new file mode 100644
index 00000000..af029cec
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, ciphertext[:offset]...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures that the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
new file mode 100644
index 00000000..f62c3bdb
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
new file mode 100644
index 00000000..093c6467
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
@@ -0,0 +1,86 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ zBytes := z.Bytes()
+
+ // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
+ // the returned byte array. This can lead to a problem where zBytes will be
+ // shorter than expected which breaks the key derivation. Therefore we must pad
+ // to the full length of the expected coordinate here before calling the KDF.
+ octSize := dSize(priv.Curve)
+ if len(zBytes) != octSize {
+ zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
+ }
+
+ reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+
+ return key
+}
+
+// dSize returns the size in octets for a coordinate on a elliptic curve.
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size++
+ }
+ return size
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
new file mode 100644
index 00000000..b9effbca
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("go-jose/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go
new file mode 100644
index 00000000..aba08424
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go
@@ -0,0 +1,593 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of name/value pairs to be inserted into the protected
+ // header of a JWS object. Some specifications which make use of
+ // JWS require additional values here.
+ //
+ // Values will be serialized by [json.Marshal] and must be valid inputs to
+ // that function.
+ //
+ // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary, and returns the updated EncrypterOptions.
+//
+// The v parameter will be serialized by [json.Marshal] and must be a valid
+// input to that function.
+//
+// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ // Key must have one of these types:
+ // - ed25519.PublicKey
+ // - *ecdsa.PublicKey
+ // - *rsa.PublicKey
+ // - *JSONWebKey
+ // - JSONWebKey
+ // - []byte (a symmetric key)
+ // - Any type that satisfies the OpaqueKeyEncrypter interface
+ //
+ // The type of Key must match the value of Algorithm.
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case OpaqueKeyEncrypter:
+ keyID, rawKey = encryptionKey.KeyID(), encryptionKey
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ keyBytes, ok := rawKey.([]byte)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(keyBytes) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: keyBytes,
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ keyDSA, ok := rawKey.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: keyDSA,
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if len(rcpts) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ case OpaqueKeyEncrypter:
+ return newOpaqueKeyEncrypter(alg, encryptionKey)
+ }
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case OpaqueKeyDecrypter:
+ return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. This
+// function does not support multi-recipient. If you desire multi-recipient
+// decryption use DecryptMulti instead.
+//
+// The decryptionKey argument must contain a private or symmetric key
+// and must have one of these types:
+// - *ecdsa.PrivateKey
+// - *rsa.PrivateKey
+// - *JSONWebKey
+// - JSONWebKey
+// - *JSONWebKeySet
+// - JSONWebKeySet
+// - []byte (a symmetric key)
+// - string (a symmetric key)
+// - Any type that satisfies the OpaqueKeyDecrypter interface.
+//
+// Note that ed25519 is only available for signatures, not encryption, so is
+// not an option here.
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >10x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key := tryJWKS(decryptionKey, obj.Header)
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ return plaintext, nil
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+//
+// The decryptionKey argument must have one of the types allowed for the
+// decryptionKey argument of Decrypt().
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >3x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key := tryJWKS(decryptionKey, obj.Header)
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go
new file mode 100644
index 00000000..0ad40ca0
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/doc.go
@@ -0,0 +1,25 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
+Token support available in a sub-package. The library supports both the compact
+and JWS/JWE JSON Serialization formats, and has optional support for multiple
+recipients.
+*/
+package jose
diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go
new file mode 100644
index 00000000..4f6e0d4a
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go
@@ -0,0 +1,228 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+ "unicode"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/go-jose/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ buf := strings.Builder{}
+ buf.Grow(len(data))
+ for _, r := range data {
+ if !unicode.IsSpace(r) {
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String()
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// deflate compresses the input.
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// inflate decompresses the input.
+//
+// Errors if the decompressed data would be >250kB or >10x the size of the
+// compressed data, whichever is larger.
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ maxCompressedSize := max(250_000, 10*int64(len(input)))
+
+ limit := maxCompressedSize + 1
+ n, err := io.CopyN(output, reader, limit)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+ if n == limit {
+ return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize)
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
+
+func base64EncodeLen(sl []byte) int {
+ return base64.RawURLEncoding.EncodedLen(len(sl))
+}
+
+func base64JoinWithDots(inputs ...[]byte) string {
+ if len(inputs) == 0 {
+ return ""
+ }
+
+ // Count of dots.
+ totalCount := len(inputs) - 1
+
+ for _, input := range inputs {
+ totalCount += base64EncodeLen(input)
+ }
+
+ out := make([]byte, totalCount)
+ startEncode := 0
+ for i, input := range inputs {
+ base64.RawURLEncoding.Encode(out[startEncode:], input)
+
+ if i == len(inputs)-1 {
+ continue
+ }
+
+ startEncode += base64EncodeLen(input)
+ out[startEncode] = '.'
+ startEncode++
+ }
+
+ return string(out)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md
new file mode 100644
index 00000000..86de5e55
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
new file mode 100644
index 00000000..50634dd8
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
@@ -0,0 +1,1216 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// “not present,†unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+type NumberUnmarshalType int
+
+const (
+ // unmarshal a JSON number into an interface{} as a float64
+ UnmarshalFloat NumberUnmarshalType = iota
+ // unmarshal a JSON number into an interface{} as a `json.Number`
+ UnmarshalJSONNumber
+ // unmarshal a JSON number into an interface{} as a int64
+ // if value is an integer otherwise float64
+ UnmarshalIntOrFloat
+)
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ numberType NumberUnmarshalType
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64, int64 or a Number
+// depending on d.numberDecodeType.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ switch d.numberType {
+
+ case UnmarshalJSONNumber:
+ return Number(s), nil
+ case UnmarshalIntOrFloat:
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ // tries to parse integer number in scientific notation
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+
+ // if it has no decimal value use int64
+ if fi, fd := math.Modf(f); fd == 0.0 {
+ return int64(fi), nil
+ }
+ return f, nil
+ default:
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+ }
+
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
new file mode 100644
index 00000000..98de68ce
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+//
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML