mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-10 13:37:08 +08:00
vendor: update buildkit to v0.18.0-rc1
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
106
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
106
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
@ -1,30 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type buckets[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
res exemplar.FilteredReservoir[N]
|
||||
|
||||
counts []uint64
|
||||
count uint64
|
||||
total N
|
||||
@ -32,8 +26,8 @@ type buckets[N int64 | float64] struct {
|
||||
}
|
||||
|
||||
// newBuckets returns buckets with n bins.
|
||||
func newBuckets[N int64 | float64](n int) *buckets[N] {
|
||||
return &buckets[N]{counts: make([]uint64, n)}
|
||||
func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] {
|
||||
return &buckets[N]{attrs: attrs, counts: make([]uint64, n)}
|
||||
}
|
||||
|
||||
func (b *buckets[N]) sum(value N) { b.total += value }
|
||||
@ -54,28 +48,31 @@ type histValues[N int64 | float64] struct {
|
||||
noSum bool
|
||||
bounds []float64
|
||||
|
||||
values map[attribute.Set]*buckets[N]
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
limit limiter[*buckets[N]]
|
||||
values map[attribute.Distinct]*buckets[N]
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] {
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
// complete control over the fix.
|
||||
b := make([]float64, len(bounds))
|
||||
copy(b, bounds)
|
||||
sort.Float64s(b)
|
||||
b := slices.Clone(bounds)
|
||||
slices.Sort(b)
|
||||
return &histValues[N]{
|
||||
noSum: noSum,
|
||||
bounds: b,
|
||||
values: make(map[attribute.Set]*buckets[N]),
|
||||
newRes: r,
|
||||
limit: newLimiter[*buckets[N]](limit),
|
||||
values: make(map[attribute.Distinct]*buckets[N]),
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate records the measurement value, scoped by attr, and aggregates it
|
||||
// into a histogram.
|
||||
func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) {
|
||||
func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
|
||||
// This search will return an index in the range [0, len(s.bounds)], where
|
||||
// it will return len(s.bounds) if value is greater than the last element
|
||||
// of s.bounds. This aligns with the buckets in that the length of buckets
|
||||
@ -86,7 +83,8 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
b, ok := s.values[attr]
|
||||
attr := s.limit.Attributes(fltrAttr, s.values)
|
||||
b, ok := s.values[attr.Equivalent()]
|
||||
if !ok {
|
||||
// N+1 buckets. For example:
|
||||
//
|
||||
@ -95,22 +93,25 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
|
||||
// Then,
|
||||
//
|
||||
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
|
||||
b = newBuckets[N](len(s.bounds) + 1)
|
||||
b = newBuckets[N](attr, len(s.bounds)+1)
|
||||
b.res = s.newRes()
|
||||
|
||||
// Ensure min and max are recorded values (not zero), for new buckets.
|
||||
b.min, b.max = value, value
|
||||
s.values[attr] = b
|
||||
s.values[attr.Equivalent()] = b
|
||||
}
|
||||
b.bin(idx, value)
|
||||
if !s.noSum {
|
||||
b.sum(value)
|
||||
}
|
||||
b.res.Offer(ctx, value, droppedAttr)
|
||||
}
|
||||
|
||||
// newHistogram returns an Aggregator that summarizes a set of measurements as
|
||||
// an histogram.
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] {
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] {
|
||||
return &histogram[N]{
|
||||
histValues: newHistValues[N](boundaries, noSum),
|
||||
histValues: newHistValues[N](boundaries, noSum, limit, r),
|
||||
noMinMax: noMinMax,
|
||||
start: now(),
|
||||
}
|
||||
@ -137,34 +138,35 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
bounds := slices.Clone(s.bounds)
|
||||
|
||||
n := len(s.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range s.values {
|
||||
hDPts[i].Attributes = a
|
||||
for _, val := range s.values {
|
||||
hDPts[i].Attributes = val.attrs
|
||||
hDPts[i].StartTime = s.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Bounds = bounds
|
||||
hDPts[i].BucketCounts = b.counts
|
||||
hDPts[i].BucketCounts = val.counts
|
||||
|
||||
if !s.noSum {
|
||||
hDPts[i].Sum = b.total
|
||||
hDPts[i].Sum = val.total
|
||||
}
|
||||
|
||||
if !s.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
hDPts[i].Min = metricdata.NewExtrema(val.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(val.max)
|
||||
}
|
||||
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, a)
|
||||
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
|
||||
|
||||
i++
|
||||
}
|
||||
// Unused attribute sets do not report.
|
||||
clear(s.values)
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
|
||||
@ -186,37 +188,37 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
bounds := slices.Clone(s.bounds)
|
||||
|
||||
n := len(s.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range s.values {
|
||||
for _, val := range s.values {
|
||||
hDPts[i].Attributes = val.attrs
|
||||
hDPts[i].StartTime = s.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Bounds = bounds
|
||||
|
||||
// The HistogramDataPoint field values returned need to be copies of
|
||||
// the buckets value as we will keep updating them.
|
||||
//
|
||||
// TODO (#3047): Making copies for bounds and counts incurs a large
|
||||
// memory allocation footprint. Alternatives should be explored.
|
||||
counts := make([]uint64, len(b.counts))
|
||||
copy(counts, b.counts)
|
||||
|
||||
hDPts[i].Attributes = a
|
||||
hDPts[i].StartTime = s.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Bounds = bounds
|
||||
hDPts[i].BucketCounts = counts
|
||||
hDPts[i].BucketCounts = slices.Clone(val.counts)
|
||||
|
||||
if !s.noSum {
|
||||
hDPts[i].Sum = b.total
|
||||
hDPts[i].Sum = val.total
|
||||
}
|
||||
|
||||
if !s.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
hDPts[i].Min = metricdata.NewExtrema(val.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(val.max)
|
||||
}
|
||||
|
||||
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
|
||||
|
||||
i++
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
|
Reference in New Issue
Block a user