vendor: update buildkit to opentelemetry support

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2021-06-15 21:02:39 -07:00
parent 6ba080d337
commit 334c93fbbe
829 changed files with 89541 additions and 24438 deletions

View File

@ -19,7 +19,10 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
// This package is EXPERIMENTAL.
// Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package encoding
import (
@ -46,10 +49,15 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
// EXPERIMENTAL: if a Compressor implements
// If a Compressor implements
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
// to determine the size of the buffer allocated for the result of decompression.
// Return -1 to indicate unknown size.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
}
var registeredCompressor = make(map[string]Compressor)

133
vendor/google.golang.org/grpc/encoding/gzip/gzip.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package gzip implements and registers the gzip compressor
// during the initialization.
//
// Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package gzip
import (
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"sync"
"google.golang.org/grpc/encoding"
)
// Name is the name registered for the gzip compressor.
const Name = "gzip"
func init() {
c := &compressor{}
c.poolCompressor.New = func() interface{} {
return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor}
}
encoding.RegisterCompressor(c)
}
type writer struct {
*gzip.Writer
pool *sync.Pool
}
// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported).
// NOTE: this function must only be called during initialization time (i.e. in an init() function),
// and is not thread-safe.
//
// The error returned will be nil if the specified level is valid.
func SetLevel(level int) error {
if level < gzip.DefaultCompression || level > gzip.BestCompression {
return fmt.Errorf("grpc: invalid gzip compression level: %d", level)
}
c := encoding.GetCompressor(Name).(*compressor)
c.poolCompressor.New = func() interface{} {
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
if err != nil {
panic(err)
}
return &writer{Writer: w, pool: &c.poolCompressor}
}
return nil
}
func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
z := c.poolCompressor.Get().(*writer)
z.Writer.Reset(w)
return z, nil
}
func (z *writer) Close() error {
defer z.pool.Put(z)
return z.Writer.Close()
}
type reader struct {
*gzip.Reader
pool *sync.Pool
}
func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
z, inPool := c.poolDecompressor.Get().(*reader)
if !inPool {
newZ, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil
}
if err := z.Reset(r); err != nil {
c.poolDecompressor.Put(z)
return nil, err
}
return z, nil
}
func (z *reader) Read(p []byte) (n int, err error) {
n, err = z.Reader.Read(p)
if err == io.EOF {
z.pool.Put(z)
}
return n, err
}
// RFC1952 specifies that the last four bytes "contains the size of
// the original (uncompressed) input data modulo 2^32."
// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
func (c *compressor) DecompressedSize(buf []byte) int {
last := len(buf)
if last < 4 {
return -1
}
return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
}
func (c *compressor) Name() string {
return Name
}
type compressor struct {
poolCompressor sync.Pool
poolDecompressor sync.Pool
}

View File

@ -21,8 +21,7 @@
package proto
import (
"math"
"sync"
"fmt"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/encoding"
@ -38,73 +37,22 @@ func init() {
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
type codec struct{}
type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}
func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
return out, nil
}
func (codec) Marshal(v interface{}) ([]byte, error) {
if pm, ok := v.(proto.Marshaler); ok {
// object can marshal itself, no need for buffer
return pm.Marshal()
vv, ok := v.(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := marshal(v, cb)
// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return out, err
return proto.Marshal(vv)
}
func (codec) Unmarshal(data []byte, v interface{}) error {
protoMsg := v.(proto.Message)
protoMsg.Reset()
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
// object can unmarshal itself, no need for buffer
return pu.Unmarshal(data)
vv, ok := v.(proto.Message)
if !ok {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
err := cb.Unmarshal(protoMsg)
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
return proto.Unmarshal(data, vv)
}
func (codec) Name() string {
return Name
}
var protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}