vendor: update buildkit to b124b0c3

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2022-02-23 12:15:16 -08:00
parent 10debb577e
commit 64ce211ba4
27 changed files with 415 additions and 180 deletions

View File

@@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.6.0-rc.2+unknown"
Version = "1.6.0+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@@ -17,6 +17,14 @@ This package provides various compression algorithms.
# changelog
* Jan 25, 2022 (v1.14.2)
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
* Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)

View File

@@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 7
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 8
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s

View File

@@ -44,7 +44,13 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
})
}
cb := func(ref string, s *session.Session) error {
cb := func(ref string, s *session.Session, opts map[string]string) error {
for k, v := range opts {
if feOpts == nil {
feOpts = map[string]string{}
}
feOpts[k] = v
}
gwClient := c.gatewayClientForBuild(ref)
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers)
if err != nil {

View File

@@ -75,7 +75,7 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s
return c.solve(ctx, def, nil, opt, statusChan)
}
type runGatewayCB func(ref string, s *session.Session) error
type runGatewayCB func(ref string, s *session.Session, opts map[string]string) error
func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
if def != nil && runGateway != nil {
@@ -109,7 +109,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
}
cacheOpt, err := parseCacheOptions(ctx, opt)
cacheOpt, err := parseCacheOptions(ctx, runGateway != nil, opt)
if err != nil {
return nil, err
}
@@ -171,6 +171,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
for k, v := range cacheOpt.frontendAttrs {
if opt.FrontendAttrs == nil {
opt.FrontendAttrs = map[string]string{}
}
opt.FrontendAttrs[k] = v
}
@@ -225,7 +228,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if runGateway != nil {
eg.Go(func() error {
err := runGateway(ref, s)
err := runGateway(ref, s, opt.FrontendAttrs)
if err == nil {
return nil
}
@@ -386,7 +389,7 @@ type cacheOptions struct {
frontendAttrs map[string]string
}
func parseCacheOptions(ctx context.Context, opt SolveOpt) (*cacheOptions, error) {
func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) {
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
@@ -471,7 +474,7 @@ func parseCacheOptions(ctx context.Context, opt SolveOpt) (*cacheOptions, error)
})
}
}
if opt.Frontend != "" {
if opt.Frontend != "" || isGateway {
// use legacy API for registry importers, because the frontend might not support the new API
if len(legacyImportRefs) > 0 {
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")

View File

@@ -99,6 +99,8 @@ type ContainerdConfig struct {
ApparmorProfile string `toml:"apparmor-profile"`
MaxParallelism int `toml:"max-parallelism"`
Rootless bool `toml:"rootless"`
}
type GCPolicy struct {

View File

@@ -341,6 +341,24 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
}
}
// these options are added by go client in solve()
if _, ok := creq.FrontendOpt["cache-imports"]; !ok {
if v, ok := c.opts["cache-imports"]; ok {
if creq.FrontendOpt == nil {
creq.FrontendOpt = map[string]string{}
}
creq.FrontendOpt["cache-imports"] = v
}
}
if _, ok := creq.FrontendOpt["cache-from"]; !ok {
if v, ok := c.opts["cache-from"]; ok {
if creq.FrontendOpt == nil {
creq.FrontendOpt = map[string]string{}
}
creq.FrontendOpt["cache-from"] = v
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,

View File

@@ -3,10 +3,17 @@ package bklog
import (
"context"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/trace"
)
func init() {
// overwrites containerd/log
log.G = GetLogger
log.L = L
}
var (
G = GetLogger
L = logrus.NewEntry(logrus.StandardLogger())
@@ -37,6 +44,8 @@ func GetLogger(ctx context.Context) (l *logrus.Entry) {
if logger != nil {
l = logger.(*logrus.Entry)
} else if logger := log.GetLogger(ctx); logger != nil {
l = logger
} else {
l = L
}

View File

@@ -7,6 +7,34 @@ import (
"golang.org/x/sys/unix"
)
// MountedFast is a method of detecting a mount point without reading
// mountinfo from procfs. A caller can only trust the result if no error
// and sure == true are returned. Otherwise, other methods (e.g. parsing
// /proc/mounts) have to be used. If unsure, use Mounted instead (which
// uses MountedFast, but falls back to parsing mountinfo if needed).
//
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
//
// This function is only available on Linux. When available (since kernel
// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
// the implementation falls back to using stat(2), which can reliably detect
// normal (but not bind) mounts.
func MountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
path, err = normalizePath(path)
if err != nil {
return false, false, err
}
mounted, sure, err = mountedFast(path)
return
}
// mountedByOpenat2 is a method of detecting a mount that works for all kinds
// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel.
func mountedByOpenat2(path string) (bool, error) {
@@ -34,24 +62,40 @@ func mountedByOpenat2(path string) (bool, error) {
return false, &os.PathError{Op: "openat2", Path: path, Err: err}
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
// mountedFast is similar to MountedFast, except it expects a normalized path.
func mountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
// Try a fast path, using openat2() with RESOLVE_NO_XDEV.
mounted, err := mountedByOpenat2(path)
mounted, err = mountedByOpenat2(path)
if err == nil {
return mounted, nil
return mounted, true, nil
}
// Another fast path: compare st.st_dev fields.
mounted, err = mountedByStat(path)
// This does not work for bind mounts, so false negative
// is possible, therefore only trust if return is true.
if mounted && err == nil {
return true, true, nil
}
return
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
}
mounted, sure, err := mountedFast(path)
if sure && err == nil {
return mounted, nil
}
// Fallback to parsing mountinfo
// Fallback to parsing mountinfo.
return mountedByMountinfo(path)
}

View File

@@ -13,9 +13,9 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
// Mounted determines if a specified path is a mount point. In case of any
// error, false (and an error) is returned.
//
// The non-existent path returns an error. If a caller is not interested
// in this particular error, it should handle it separately using e.g.
// errors.Is(err, os.ErrNotExist).
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
func Mounted(path string) (bool, error) {
// root is always mounted
if path == string(os.PathSeparator) {

View File

@@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@@ -20,6 +20,7 @@ import (
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
@@ -31,10 +32,14 @@ import (
type goCollector struct {
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample
rmMetrics []Metric
rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
@@ -52,13 +57,24 @@ type goCollector struct {
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
descMap := make(map[string]*metrics.Description)
for i := range descriptions {
descMap[descriptions[i].Name] = &descriptions[i]
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]Metric, 0, len(descriptions))
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
@@ -76,9 +92,10 @@ func NewGoCollector() Collector {
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m Metric
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
@@ -86,6 +103,7 @@ func NewGoCollector() Collector {
nil,
nil,
),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum,
)
} else if d.Cumulative {
@@ -130,9 +148,25 @@ func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
@@ -157,7 +191,6 @@ func (c *goCollector) Collect(ch chan<- Metric) {
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
@@ -280,13 +313,27 @@ type batchHistogram struct {
// but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare.
mu sync.Mutex
buckets []float64 // Inclusive lower bounds.
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64
sum float64 // Used if hasSum is true.
}
func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
h := &batchHistogram{desc: desc, hasSum: hasSum}
// newBatchHistogram creates a new batch histogram value with the given
// Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h)
return h
}
@@ -294,28 +341,25 @@ func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
// update updates the batchHistogram from a runtime/metrics histogram.
//
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets
// Skip a -Inf bucket altogether. It's not clear how to represent that.
if math.IsInf(buckets[0], -1) {
buckets = buckets[1:]
counts = counts[1:]
}
h.mu.Lock()
defer h.mu.Unlock()
// Check if we're initialized.
if h.buckets == nil {
// Make copies of counts and buckets. It's really important
// that we don't retain his.Counts or his.Buckets anywhere since
// it's going to get reused.
h.buckets = make([]float64, len(buckets))
copy(h.buckets, buckets)
h.counts = make([]uint64, len(counts))
// Clear buckets.
for i := range h.counts {
h.counts[i] = 0
}
// Copy and reduce buckets.
var j int
for i, count := range counts {
h.counts[j] += count
if buckets[i+1] == h.buckets[j+1] {
j++
}
}
copy(h.counts, counts)
if h.hasSum {
h.sum = sum
}

View File

@@ -17,6 +17,7 @@
package internal
import (
"math"
"path"
"runtime/metrics"
"strings"
@@ -75,3 +76,67 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
}
return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}

View File

@@ -4,11 +4,26 @@ run:
tests: true #Default
linters:
# Disable everything by default so upgrades to not include new "default
# enabled" linters.
disable-all: true
# Specifically enable linters we want to use.
enable:
- misspell
- goimports
- revive
- deadcode
- errcheck
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues:
exclude-rules:

View File

@@ -8,6 +8,12 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.4.1] - 2022-02-16
### Fixed
- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
## [1.4.0] - 2022-02-11
### Added
@@ -1683,7 +1689,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD
[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0

View File

@@ -12,13 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort)
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go
@@ -27,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: dependabot-check license-check lint build examples test-default
ci: precommit check-clean-work-tree test-coverage
precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@@ -72,51 +70,47 @@ tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(POR
# Build
.PHONY: examples generate build
examples:
@set -e; for dir in $(EXAMPLES); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build .); \
done
.PHONY: generate build
generate: $(STRINGER) $(PORTO)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) generate $${dir}/..."; \
(cd "$${dir}" && \
PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && \
$(PORTO) -w .); \
done
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
generate/%: | $(STRINGER) $(PORTO)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
build: generate
# Build all package code including testing code.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build ./... && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \
done
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build/%: DIR=$*
build/%:
@echo "$(GO) build $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) build ./...
build-tests/%: DIR=$*
build-tests/%:
@echo "$(GO) build tests $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
# Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
.PHONY: $(TEST_TARGETS) test
test-default: ARGS=-v -race
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-verbose: ARGS=-v
test-race: ARGS=-race
test-verbose: ARGS=-v -race
$(TEST_TARGETS): test
test:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \
done
test: $(OTEL_GO_MOD_DIRS:%=test/%)
test/%: DIR=$*
test/%:
@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
@@ -134,32 +128,42 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
golangci-lint/%: | $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
crosslink: | $(CROSSLINK)
@echo "cross-linking all go modules" \
&& $(CROSSLINK)
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint
lint: misspell lint-modules | $(GOLANGCI_LINT)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(GOLANGCI_LINT) run --fix && \
$(GOLANGCI_LINT) run); \
done
lint: misspell lint-modules golangci-lint
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
$(PORTO) --include-internal -l .
@$(PORTO) --include-internal -l .
.PHONY: misspell
misspell: | $(MISSPELL)
$(MISSPELL) -w $(ALL_DOCS)
.PHONY: lint-modules
lint-modules: | $(CROSSLINK)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "$(GO) mod tidy in $${dir}"; \
(cd "$${dir}" && \
$(GO) mod tidy); \
done
echo "cross-linking all go modules"
$(CROSSLINK)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: license-check
license-check:
@@ -171,17 +175,18 @@ license-check:
exit 1; \
fi
DEPENDABOT_PATH=./.github/dependabot.yml
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \
do grep -q "directory: \+$$f" .github/dependabot.yml \
do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
echo "new modules need to be added to the .github/dependabot.yml file"; \
echo "missing dependabot entry:"; echo "$$result"; \
echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \
exit 1; \
fi

View File

@@ -5,10 +5,10 @@ go 1.16
require (
github.com/google/go-cmp v0.5.7
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v1.4.0
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0
go.opentelemetry.io/otel/sdk v1.4.0
go.opentelemetry.io/otel/trace v1.4.0
go.opentelemetry.io/otel v1.4.1
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1
go.opentelemetry.io/otel/sdk v1.4.1
go.opentelemetry.io/otel/trace v1.4.1
go.opentelemetry.io/proto/otlp v0.12.0
google.golang.org/grpc v1.44.0
google.golang.org/protobuf v1.27.1

View File

@@ -4,10 +4,10 @@ go 1.16
require (
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v1.4.0
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0
go.opentelemetry.io/otel/sdk v1.4.0
go.opentelemetry.io/otel v1.4.1
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1
go.opentelemetry.io/otel/sdk v1.4.1
go.opentelemetry.io/proto/otlp v0.12.0
go.uber.org/goleak v1.1.12
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013

View File

@@ -4,11 +4,11 @@ go 1.16
require (
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v1.4.0
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0
go.opentelemetry.io/otel/sdk v1.4.0
go.opentelemetry.io/otel/trace v1.4.0
go.opentelemetry.io/otel v1.4.1
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1
go.opentelemetry.io/otel/sdk v1.4.1
go.opentelemetry.io/otel/trace v1.4.1
go.opentelemetry.io/proto/otlp v0.12.0
google.golang.org/protobuf v1.27.1
)

View File

@@ -7,7 +7,7 @@ require (
github.com/go-logr/stdr v1.2.2
github.com/google/go-cmp v0.5.7
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel/trace v1.4.0
go.opentelemetry.io/otel/trace v1.4.1
)
replace go.opentelemetry.io/otel => ./

View File

@@ -250,7 +250,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
}
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "dropped", bsp.dropped)
global.Debug("exporting spans", "count", len(bsp.batch), "dropped", atomic.LoadUint32(&bsp.dropped))
err := bsp.e.ExportSpans(ctx, bsp.batch)
// A new batch is always created after exporting, even if the batch failed to be exported.

View File

@@ -43,7 +43,7 @@ replace go.opentelemetry.io/otel/trace => ./
require (
github.com/google/go-cmp v0.5.7
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v1.4.0
go.opentelemetry.io/otel v1.4.1
)
replace go.opentelemetry.io/otel/example/passthrough => ../example/passthrough

View File

@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.4.0"
return "1.4.1"
}

View File

@@ -14,7 +14,7 @@
module-sets:
stable-v1:
version: v1.4.0
version: v1.4.1
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@@ -51,7 +51,7 @@ module-sets:
modules:
- go.opentelemetry.io/otel/schema
bridge:
version: v0.27.0
version: v0.27.1
modules:
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test

30
vendor/modules.txt vendored
View File

@@ -45,7 +45,7 @@ github.com/compose-spec/godotenv
# github.com/containerd/console v1.0.3
## explicit
github.com/containerd/console
# github.com/containerd/containerd v1.6.0-rc.2
# github.com/containerd/containerd v1.6.0
## explicit
github.com/containerd/containerd/api/services/content/v1
github.com/containerd/containerd/archive/compression
@@ -276,7 +276,7 @@ github.com/inconshreveable/mousetrap
github.com/json-iterator/go
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
## explicit
# github.com/klauspost/compress v1.14.2
# github.com/klauspost/compress v1.14.3
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -293,7 +293,7 @@ github.com/miekg/pkcs11
github.com/mitchellh/go-wordwrap
# github.com/mitchellh/mapstructure v1.4.2
github.com/mitchellh/mapstructure
# github.com/moby/buildkit v0.10.0-rc1.0.20220215175614-1e6032cec26b
# github.com/moby/buildkit v0.10.0-rc1.0.20220223192213-b124b0c3f19c
## explicit
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types
@@ -350,7 +350,7 @@ github.com/moby/spdystream
github.com/moby/spdystream/spdy
# github.com/moby/sys/mount v0.3.0
github.com/moby/sys/mount
# github.com/moby/sys/mountinfo v0.5.0
# github.com/moby/sys/mountinfo v0.6.0
github.com/moby/sys/mountinfo
# github.com/moby/sys/signal v0.6.0
github.com/moby/sys/signal
@@ -381,7 +381,7 @@ github.com/pelletier/go-toml
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.12.0
# github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
@@ -456,7 +456,7 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
# go.opentelemetry.io/otel v1.4.0
# go.opentelemetry.io/otel v1.4.1
## explicit
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -467,15 +467,15 @@ go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.7.0
# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.0
# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1
go.opentelemetry.io/otel/exporters/otlp/internal/retry
# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.0
# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.0
# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.0
# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
# go.opentelemetry.io/otel/internal/metric v0.27.0
go.opentelemetry.io/otel/internal/metric/global
@@ -486,13 +486,13 @@ go.opentelemetry.io/otel/metric/global
go.opentelemetry.io/otel/metric/number
go.opentelemetry.io/otel/metric/sdkapi
go.opentelemetry.io/otel/metric/unit
# go.opentelemetry.io/otel/sdk v1.4.0
# go.opentelemetry.io/otel/sdk v1.4.1
go.opentelemetry.io/otel/sdk/instrumentation
go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
# go.opentelemetry.io/otel/trace v1.4.0
# go.opentelemetry.io/otel/trace v1.4.1
## explicit
go.opentelemetry.io/otel/trace
# go.opentelemetry.io/proto/otlp v0.12.0
@@ -669,7 +669,7 @@ gopkg.in/inf.v0
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gopkg.in/yaml.v3
# k8s.io/api v0.23.3 => k8s.io/api v0.22.4
# k8s.io/api v0.23.4 => k8s.io/api v0.22.4
## explicit
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1beta1
@@ -714,7 +714,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apimachinery v0.23.3 => k8s.io/apimachinery v0.22.4
# k8s.io/apimachinery v0.23.4 => k8s.io/apimachinery v0.22.4
## explicit
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@@ -756,7 +756,7 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.23.3 => k8s.io/client-go v0.22.4
# k8s.io/client-go v0.23.4 => k8s.io/client-go v0.22.4
## explicit
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1