mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-22 11:18:04 +08:00
vendor: update buildkit to v0.19.0-rc1
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@@ -38,8 +37,8 @@ type Builder[N int64 | float64] struct {
|
||||
// create new exemplar reservoirs for a new seen attribute set.
|
||||
//
|
||||
// If this is not provided a default factory function that returns an
|
||||
// exemplar.Drop reservoir will be used.
|
||||
ReservoirFunc func() exemplar.FilteredReservoir[N]
|
||||
// DropReservoir reservoir will be used.
|
||||
ReservoirFunc func() FilteredExemplarReservoir[N]
|
||||
// AggregationLimit is the cardinality limit of measurement attributes. Any
|
||||
// measurement for new attributes once the limit has been reached will be
|
||||
// aggregated into a single aggregate for the "otel.metric.overflow"
|
||||
@@ -50,12 +49,12 @@ type Builder[N int64 | float64] struct {
|
||||
AggregationLimit int
|
||||
}
|
||||
|
||||
func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] {
|
||||
func (b Builder[N]) resFunc() func() FilteredExemplarReservoir[N] {
|
||||
if b.ReservoirFunc != nil {
|
||||
return b.ReservoirFunc
|
||||
}
|
||||
|
||||
return exemplar.Drop
|
||||
return DropReservoir
|
||||
}
|
||||
|
||||
type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
|
||||
|
24
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
Normal file
24
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
)
|
||||
|
||||
// DropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
|
||||
func DropReservoir[N int64 | float64]() FilteredExemplarReservoir[N] { return &dropRes[N]{} }
|
||||
|
||||
type dropRes[N int64 | float64] struct{}
|
||||
|
||||
// Offer does nothing, all measurements offered will be dropped.
|
||||
func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
|
||||
|
||||
// Collect resets dest. No exemplars will ever be returned.
|
||||
func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
|
||||
*dest = (*dest)[:0]
|
||||
}
|
2
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
generated
vendored
@@ -6,7 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
|
85
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
85
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@@ -31,7 +30,7 @@ const (
|
||||
// expoHistogramDataPoint is a single data point in an exponential histogram.
|
||||
type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
|
||||
count uint64
|
||||
min N
|
||||
@@ -42,14 +41,14 @@ type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
noMinMax bool
|
||||
noSum bool
|
||||
|
||||
scale int
|
||||
scale int32
|
||||
|
||||
posBuckets expoBuckets
|
||||
negBuckets expoBuckets
|
||||
zeroCount uint64
|
||||
}
|
||||
|
||||
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
||||
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
||||
f := math.MaxFloat64
|
||||
max := N(f) // if N is int64, max will overflow to -9223372036854775808
|
||||
min := N(-f)
|
||||
@@ -119,11 +118,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
|
||||
}
|
||||
|
||||
// getBin returns the bin v should be recorded into.
|
||||
func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
||||
frac, exp := math.Frexp(v)
|
||||
func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
|
||||
frac, expInt := math.Frexp(v)
|
||||
// 11-bit exponential.
|
||||
exp := int32(expInt) // nolint: gosec
|
||||
if p.scale <= 0 {
|
||||
// Because of the choice of fraction is always 1 power of two higher than we want.
|
||||
correction := 1
|
||||
var correction int32 = 1
|
||||
if frac == .5 {
|
||||
// If v is an exact power of two the frac will be .5 and the exp
|
||||
// will be one higher than we want.
|
||||
@@ -131,7 +132,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
||||
}
|
||||
return (exp - correction) >> (-p.scale)
|
||||
}
|
||||
return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1
|
||||
return exp<<p.scale + int32(math.Log(frac)*scaleFactors[p.scale]) - 1
|
||||
}
|
||||
|
||||
// scaleFactors are constants used in calculating the logarithm index. They are
|
||||
@@ -162,20 +163,20 @@ var scaleFactors = [21]float64{
|
||||
|
||||
// scaleChange returns the magnitude of the scale change needed to fit bin in
|
||||
// the bucket. If no scale change is needed 0 is returned.
|
||||
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
||||
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) int32 {
|
||||
if length == 0 {
|
||||
// No need to rescale if there are no buckets.
|
||||
return 0
|
||||
}
|
||||
|
||||
low := startBin
|
||||
high := bin
|
||||
low := int(startBin)
|
||||
high := int(bin)
|
||||
if startBin >= bin {
|
||||
low = bin
|
||||
high = startBin + length - 1
|
||||
low = int(bin)
|
||||
high = int(startBin) + length - 1
|
||||
}
|
||||
|
||||
count := 0
|
||||
var count int32
|
||||
for high-low >= p.maxSize {
|
||||
low = low >> 1
|
||||
high = high >> 1
|
||||
@@ -189,39 +190,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
||||
|
||||
// expoBuckets is a set of buckets in an exponential histogram.
|
||||
type expoBuckets struct {
|
||||
startBin int
|
||||
startBin int32
|
||||
counts []uint64
|
||||
}
|
||||
|
||||
// record increments the count for the given bin, and expands the buckets if needed.
|
||||
// Size changes must be done before calling this function.
|
||||
func (b *expoBuckets) record(bin int) {
|
||||
func (b *expoBuckets) record(bin int32) {
|
||||
if len(b.counts) == 0 {
|
||||
b.counts = []uint64{1}
|
||||
b.startBin = bin
|
||||
return
|
||||
}
|
||||
|
||||
endBin := b.startBin + len(b.counts) - 1
|
||||
endBin := int(b.startBin) + len(b.counts) - 1
|
||||
|
||||
// if the new bin is inside the current range
|
||||
if bin >= b.startBin && bin <= endBin {
|
||||
if bin >= b.startBin && int(bin) <= endBin {
|
||||
b.counts[bin-b.startBin]++
|
||||
return
|
||||
}
|
||||
// if the new bin is before the current start add spaces to the counts
|
||||
if bin < b.startBin {
|
||||
origLen := len(b.counts)
|
||||
newLength := endBin - bin + 1
|
||||
newLength := endBin - int(bin) + 1
|
||||
shift := b.startBin - bin
|
||||
|
||||
if newLength > cap(b.counts) {
|
||||
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
|
||||
}
|
||||
|
||||
copy(b.counts[shift:origLen+shift], b.counts[:])
|
||||
copy(b.counts[shift:origLen+int(shift)], b.counts[:])
|
||||
b.counts = b.counts[:newLength]
|
||||
for i := 1; i < shift; i++ {
|
||||
for i := 1; i < int(shift); i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.startBin = bin
|
||||
@@ -229,17 +230,17 @@ func (b *expoBuckets) record(bin int) {
|
||||
return
|
||||
}
|
||||
// if the new is after the end add spaces to the end
|
||||
if bin > endBin {
|
||||
if bin-b.startBin < cap(b.counts) {
|
||||
if int(bin) > endBin {
|
||||
if int(bin-b.startBin) < cap(b.counts) {
|
||||
b.counts = b.counts[:bin-b.startBin+1]
|
||||
for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
|
||||
for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.counts[bin-b.startBin] = 1
|
||||
return
|
||||
}
|
||||
|
||||
end := make([]uint64, bin-b.startBin-len(b.counts)+1)
|
||||
end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
|
||||
b.counts = append(b.counts, end...)
|
||||
b.counts[bin-b.startBin] = 1
|
||||
}
|
||||
@@ -247,7 +248,7 @@ func (b *expoBuckets) record(bin int) {
|
||||
|
||||
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
|
||||
// correct lower resolution bucket.
|
||||
func (b *expoBuckets) downscale(delta int) {
|
||||
func (b *expoBuckets) downscale(delta int32) {
|
||||
// Example
|
||||
// delta = 2
|
||||
// Original offset: -6
|
||||
@@ -262,19 +263,19 @@ func (b *expoBuckets) downscale(delta int) {
|
||||
return
|
||||
}
|
||||
|
||||
steps := 1 << delta
|
||||
steps := int32(1) << delta
|
||||
offset := b.startBin % steps
|
||||
offset = (offset + steps) % steps // to make offset positive
|
||||
for i := 1; i < len(b.counts); i++ {
|
||||
idx := i + offset
|
||||
if idx%steps == 0 {
|
||||
b.counts[idx/steps] = b.counts[i]
|
||||
idx := i + int(offset)
|
||||
if idx%int(steps) == 0 {
|
||||
b.counts[idx/int(steps)] = b.counts[i]
|
||||
continue
|
||||
}
|
||||
b.counts[idx/steps] += b.counts[i]
|
||||
b.counts[idx/int(steps)] += b.counts[i]
|
||||
}
|
||||
|
||||
lastIdx := (len(b.counts) - 1 + offset) / steps
|
||||
lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
|
||||
b.counts = b.counts[:lastIdx+1]
|
||||
b.startBin = b.startBin >> delta
|
||||
}
|
||||
@@ -282,12 +283,12 @@ func (b *expoBuckets) downscale(delta int) {
|
||||
// newExponentialHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an exponential histogram. Each histogram is scoped by attributes
|
||||
// and the aggregation cycle the measurements were made in.
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] {
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *expoHistogram[N] {
|
||||
return &expoHistogram[N]{
|
||||
noSum: noSum,
|
||||
noMinMax: noMinMax,
|
||||
maxSize: int(maxSize),
|
||||
maxScale: int(maxScale),
|
||||
maxScale: maxScale,
|
||||
|
||||
newRes: r,
|
||||
limit: newLimiter[*expoHistogramDataPoint[N]](limit),
|
||||
@@ -303,9 +304,9 @@ type expoHistogram[N int64 | float64] struct {
|
||||
noSum bool
|
||||
noMinMax bool
|
||||
maxSize int
|
||||
maxScale int
|
||||
maxScale int32
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func() FilteredExemplarReservoir[N]
|
||||
limit limiter[*expoHistogramDataPoint[N]]
|
||||
values map[attribute.Distinct]*expoHistogramDataPoint[N]
|
||||
valuesMu sync.Mutex
|
||||
@@ -354,15 +355,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Scale = int32(val.scale)
|
||||
hDPts[i].Scale = val.scale
|
||||
hDPts[i].ZeroCount = val.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
@@ -407,15 +408,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Scale = int32(val.scale)
|
||||
hDPts[i].Scale = val.scale
|
||||
hDPts[i].ZeroCount = val.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
|
50
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
Normal file
50
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
)
|
||||
|
||||
// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
|
||||
type FilteredExemplarReservoir[N int64 | float64] interface {
|
||||
// Offer accepts the parameters associated with a measurement. The
|
||||
// parameters will be stored as an exemplar if the filter decides to
|
||||
// sample the measurement.
|
||||
//
|
||||
// The passed ctx needs to contain any baggage or span that were active
|
||||
// when the measurement was made. This information may be used by the
|
||||
// Reservoir in making a sampling decision.
|
||||
Offer(ctx context.Context, val N, attr []attribute.KeyValue)
|
||||
// Collect returns all the held exemplars in the reservoir.
|
||||
Collect(dest *[]exemplar.Exemplar)
|
||||
}
|
||||
|
||||
// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made.
|
||||
type filteredExemplarReservoir[N int64 | float64] struct {
|
||||
filter exemplar.Filter
|
||||
reservoir exemplar.Reservoir
|
||||
}
|
||||
|
||||
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
|
||||
// that are allowed by the filter.
|
||||
func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
|
||||
return &filteredExemplarReservoir[N]{
|
||||
filter: f,
|
||||
reservoir: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
|
||||
if f.filter(ctx) {
|
||||
// only record the current time if we are sampling this measurement.
|
||||
f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }
|
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
@@ -11,13 +11,12 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type buckets[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
|
||||
counts []uint64
|
||||
count uint64
|
||||
@@ -48,13 +47,13 @@ type histValues[N int64 | float64] struct {
|
||||
noSum bool
|
||||
bounds []float64
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func() FilteredExemplarReservoir[N]
|
||||
limit limiter[*buckets[N]]
|
||||
values map[attribute.Distinct]*buckets[N]
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] {
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
@@ -109,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
|
||||
|
||||
// newHistogram returns an Aggregator that summarizes a set of measurements as
|
||||
// an histogram.
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] {
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histogram[N] {
|
||||
return &histogram[N]{
|
||||
histValues: newHistValues[N](boundaries, noSum, limit, r),
|
||||
noMinMax: noMinMax,
|
||||
|
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@@ -17,10 +16,10 @@ import (
|
||||
type datapoint[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
value N
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
}
|
||||
|
||||
func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] {
|
||||
func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *lastValue[N] {
|
||||
return &lastValue[N]{
|
||||
newRes: r,
|
||||
limit: newLimiter[datapoint[N]](limit),
|
||||
@@ -33,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv
|
||||
type lastValue[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func() FilteredExemplarReservoir[N]
|
||||
limit limiter[datapoint[N]]
|
||||
values map[attribute.Distinct]datapoint[N]
|
||||
start time.Time
|
||||
@@ -115,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
|
||||
|
||||
// newPrecomputedLastValue returns an aggregator that summarizes a set of
|
||||
// observations as the last one made.
|
||||
func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] {
|
||||
func newPrecomputedLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
|
||||
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
|
||||
}
|
||||
|
||||
|
15
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
15
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
@@ -9,25 +9,24 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type sumValue[N int64 | float64] struct {
|
||||
n N
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
attrs attribute.Set
|
||||
}
|
||||
|
||||
// valueMap is the storage for sums.
|
||||
type valueMap[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func() FilteredExemplarReservoir[N]
|
||||
limit limiter[sumValue[N]]
|
||||
values map[attribute.Distinct]sumValue[N]
|
||||
}
|
||||
|
||||
func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] {
|
||||
func newValueMap[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *valueMap[N] {
|
||||
return &valueMap[N]{
|
||||
newRes: r,
|
||||
limit: newLimiter[sumValue[N]](limit),
|
||||
@@ -55,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
|
||||
// newSum returns an aggregator that summarizes a set of measurements as their
|
||||
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
|
||||
// the measurements were made in.
|
||||
func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] {
|
||||
func newSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *sum[N] {
|
||||
return &sum[N]{
|
||||
valueMap: newValueMap[N](limit, r),
|
||||
monotonic: monotonic,
|
||||
@@ -142,9 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
}
|
||||
|
||||
// newPrecomputedSum returns an aggregator that summarizes a set of
|
||||
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// observations as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] {
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *precomputedSum[N] {
|
||||
return &precomputedSum[N]{
|
||||
valueMap: newValueMap[N](limit, r),
|
||||
monotonic: monotonic,
|
||||
@@ -152,7 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex
|
||||
}
|
||||
}
|
||||
|
||||
// precomputedSum summarizes a set of observatrions as their arithmetic sum.
|
||||
// precomputedSum summarizes a set of observations as their arithmetic sum.
|
||||
type precomputedSum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
|
Reference in New Issue
Block a user