Bump buildkit to master and fix versions incompatible with go mod 1.13

Bump github.com/gogo/googleapis to v1.3.2
Bump github.com/docker/cli to master

Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
This commit is contained in:
Silvin Lubecki
2020-03-03 16:46:38 +01:00
parent 54549235da
commit bbc902b4d6
1384 changed files with 186012 additions and 165455 deletions

View File

@@ -21,11 +21,18 @@ import (
"time"
)
// PassiveClock allows for injecting fake or real clocks into code
// that needs to read the current time but does not support scheduling
// activity in the future.
type PassiveClock interface {
Now() time.Time
Since(time.Time) time.Duration
}
// Clock allows for injecting fake or real clocks into code that
// needs to do arbitrary things based on time.
type Clock interface {
Now() time.Time
Since(time.Time) time.Duration
PassiveClock
After(time.Duration) <-chan time.Time
NewTimer(time.Duration) Timer
Sleep(time.Duration)
@@ -66,10 +73,15 @@ func (RealClock) Sleep(d time.Duration) {
time.Sleep(d)
}
// FakeClock implements Clock, but returns an arbitrary time.
type FakeClock struct {
// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
type FakePassiveClock struct {
lock sync.RWMutex
time time.Time
}
// FakeClock implements Clock, but returns an arbitrary time.
type FakeClock struct {
FakePassiveClock
// waiters are waiting for the fake time to pass their specified time
waiters []fakeClockWaiter
@@ -80,29 +92,41 @@ type fakeClockWaiter struct {
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
fired bool
}
func NewFakeClock(t time.Time) *FakeClock {
return &FakeClock{
func NewFakePassiveClock(t time.Time) *FakePassiveClock {
return &FakePassiveClock{
time: t,
}
}
func NewFakeClock(t time.Time) *FakeClock {
return &FakeClock{
FakePassiveClock: *NewFakePassiveClock(t),
}
}
// Now returns f's time.
func (f *FakeClock) Now() time.Time {
func (f *FakePassiveClock) Now() time.Time {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time
}
// Since returns time since the time in f.
func (f *FakeClock) Since(ts time.Time) time.Duration {
func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time.Sub(ts)
}
// Sets the time.
func (f *FakePassiveClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.time = t
}
// Fake version of time.After(d).
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
f.lock.Lock()
@@ -175,12 +199,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
w.fired = true
default:
}
} else {
w.destChan <- t
w.fired = true
}
if w.stepInterval > 0 {
@@ -287,36 +309,50 @@ func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
// Stop conditionally stops the timer. If the timer has neither fired
// nor been stopped then this call stops the timer and returns true,
// otherwise this call returns false. This is like time.Timer::Stop.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
for i := range f.fakeClock.waiters {
w := &f.fakeClock.waiters[i]
if w != &f.waiter {
newWaiters = append(newWaiters, *w)
// The timer has already fired or been stopped, unless it is found
// among the clock's waiters.
stopped := false
oldWaiters := f.fakeClock.waiters
newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
seekChan := f.waiter.destChan
for i := range oldWaiters {
// Identify the timer's fakeClockWaiter by the identity of the
// destination channel, nothing else is necessarily unique and
// constant since the timer's creation.
if oldWaiters[i].destChan == seekChan {
stopped = true
} else {
newWaiters = append(newWaiters, oldWaiters[i])
}
}
f.fakeClock.waiters = newWaiters
return !f.waiter.fired
return stopped
}
// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
// fired, or false otherwise.
// Reset conditionally updates the firing time of the timer. If the
// timer has neither fired nor been stopped then this call resets the
// timer to the fake clock's "now" + d and returns true, otherwise
// this call returns false. This is like time.Timer::Reset.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
active := !f.waiter.fired
f.waiter.fired = false
f.waiter.targetTime = f.fakeClock.time.Add(d)
return active
waiters := f.fakeClock.waiters
seekChan := f.waiter.destChan
for i := range waiters {
if waiters[i].destChan == seekChan {
waiters[i].targetTime = f.fakeClock.time.Add(d)
return true
}
}
return false
}
type Ticker interface {

View File

@@ -19,6 +19,8 @@ package errors
import (
"errors"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
)
// MessageCountMap contains occurrence for each error message.
@@ -67,12 +69,38 @@ func (agg aggregate) Error() string {
if len(agg) == 1 {
return agg[0].Error()
}
result := fmt.Sprintf("[%s", agg[0].Error())
for i := 1; i < len(agg); i++ {
result += fmt.Sprintf(", %s", agg[i].Error())
seenerrs := sets.NewString()
result := ""
agg.visit(func(err error) {
msg := err.Error()
if seenerrs.Has(msg) {
return
}
seenerrs.Insert(msg)
if len(seenerrs) > 1 {
result += ", "
}
result += msg
})
if len(seenerrs) == 1 {
return result
}
return "[" + result + "]"
}
func (agg aggregate) visit(f func(err error)) {
for _, err := range agg {
switch err := err.(type) {
case aggregate:
err.visit(f)
case Aggregate:
for _, nestedErr := range err.Errors() {
f(nestedErr)
}
default:
f(err)
}
}
result += "]"
return result
}
// Errors is part of the Aggregate interface.

View File

@@ -136,12 +136,12 @@ func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []strin
negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
if len(negotiatedProtocol) == 0 {
w.WriteHeader(http.StatusForbidden)
for i := range serverProtocols {
w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
}
fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols)
err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
http.Error(w, err.Error(), http.StatusForbidden)
return "", err
}
w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)

View File

@@ -23,8 +23,8 @@ import (
"time"
"github.com/docker/spdystream"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
)
// connection maintains state about a spdystream.Connection and its associated
@@ -128,7 +128,7 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) {
err := c.newStreamHandler(stream, replySent)
rejectStream := (err != nil)
if rejectStream {
glog.Warningf("Stream rejected: %v", err)
klog.Warningf("Stream rejected: %v", err)
stream.Reset()
return
}

View File

@@ -67,6 +67,9 @@ type SpdyRoundTripper struct {
// followRedirects indicates if the round tripper should examine responses for redirects and
// follow them.
followRedirects bool
// requireSameHostRedirects restricts redirect following to only follow redirects to the same host
// as the original request.
requireSameHostRedirects bool
}
var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
@@ -75,14 +78,18 @@ var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig, followRedirects)
func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
}
// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig. This function is mostly meant for unit tests.
func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper {
return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects}
func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
return &SpdyRoundTripper{
tlsConfig: tlsConfig,
followRedirects: followRedirects,
requireSameHostRedirects: requireSameHostRedirects,
}
}
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
@@ -257,7 +264,7 @@ func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
)
if s.followRedirects {
conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s)
conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
} else {
clone := utilnet.CloneRequest(req)
clone.Header = header

View File

@@ -74,15 +74,15 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque
connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header)
errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
http.Error(w, errorMsg, http.StatusBadRequest)
return nil
}
hijacker, ok := w.(http.Hijacker)
if !ok {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "unable to upgrade: unable to hijack response")
errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
http.Error(w, errorMsg, http.StatusInternalServerError)
return nil
}

View File

@@ -14,26 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
// DO NOT EDIT!
/*
Package intstr is a generated protocol buffer package.
It is generated from these files:
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
It has these top-level messages:
IntOrString
*/
package intstr
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
import io "io"
io "io"
math "math"
math_bits "math/bits"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -46,17 +40,69 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
func (m *IntOrString) Reset() { *m = IntOrString{} }
func (*IntOrString) ProtoMessage() {}
func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
func (m *IntOrString) Reset() { *m = IntOrString{} }
func (*IntOrString) ProtoMessage() {}
func (*IntOrString) Descriptor() ([]byte, []int) {
return fileDescriptor_94e046ae3ce6121c, []int{0}
}
func (m *IntOrString) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *IntOrString) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntOrString.Merge(m, src)
}
func (m *IntOrString) XXX_Size() int {
return m.Size()
}
func (m *IntOrString) XXX_DiscardUnknown() {
xxx_messageInfo_IntOrString.DiscardUnknown(m)
}
var xxx_messageInfo_IntOrString proto.InternalMessageInfo
func init() {
proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
}
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c)
}
var fileDescriptor_94e046ae3ce6121c = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
0x64, 0x01, 0x00, 0x00,
}
func (m *IntOrString) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@@ -64,51 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) {
}
func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
dAtA[i] = 0x10
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
dAtA[i] = 0x1a
i++
i -= len(m.StrVal)
copy(dAtA[i:], m.StrVal)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
i += copy(dAtA[i:], m.StrVal)
return i, nil
i--
dAtA[i] = 0x1a
i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
i--
dAtA[i] = 0x10
i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
return len(dAtA) - i, nil
}
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
return base
}
func (m *IntOrString) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 1 + sovGenerated(uint64(m.Type))
@@ -119,14 +158,7 @@ func (m *IntOrString) Size() (n int) {
}
func sovGenerated(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@@ -146,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -174,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (Type(b) & 0x7F) << shift
m.Type |= Type(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -193,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.IntVal |= (int32(b) & 0x7F) << shift
m.IntVal |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -212,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -222,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -236,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -302,10 +340,13 @@ func skipGenerated(dAtA []byte) (n int, err error) {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
case 3:
for {
@@ -334,6 +375,9 @@ func skipGenerated(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
}
return iNdEx, nil
case 4:
@@ -352,30 +396,3 @@ var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated)
}
var fileDescriptorGenerated = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
0x64, 0x01, 0x00, 0x00,
}

View File

@@ -29,7 +29,7 @@ option go_package = "intstr";
// inner type. This allows you to have, for example, a JSON field that can
// accept a name or number.
// TODO: Rename to Int32OrString
//
//
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true

View File

@@ -18,14 +18,15 @@ package intstr
import (
"encoding/json"
"errors"
"fmt"
"math"
"runtime/debug"
"strconv"
"strings"
"github.com/golang/glog"
"github.com/google/gofuzz"
"k8s.io/klog"
)
// IntOrString is a type that can hold an int32 or a string. When used in
@@ -44,7 +45,7 @@ type IntOrString struct {
}
// Type represents the stored type of IntOrString.
type Type int
type Type int64
const (
Int Type = iota // The IntOrString holds an int.
@@ -57,7 +58,7 @@ const (
// TODO: convert to (val int32)
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
}
return IntOrString{Type: Int, IntVal: int32(val)}
}
@@ -121,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) {
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
if intstr == nil {
@@ -142,7 +143,17 @@ func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
}
}
func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
if intOrPercent == nil {
return &defaultValue
}
return intOrPercent
}
func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
if intOrPercent == nil {
return 0, errors.New("nil value for IntOrString")
}
value, isPercent, err := getIntOrPercentValue(intOrPercent)
if err != nil {
return 0, fmt.Errorf("invalid value for IntOrString: %v", err)

View File

@@ -19,6 +19,7 @@ package json
import (
"bytes"
"encoding/json"
"fmt"
"io"
)
@@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
// limit recursive depth to prevent stack overflow errors
const maxDepth = 10000
// Unmarshal unmarshals the given data
// If v is a *map[string]interface{}, numbers are converted to int64 or float64
func Unmarshal(data []byte, v interface{}) error {
@@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertMapNumbers(*v)
return convertMapNumbers(*v, 0)
case *[]interface{}:
// Build a decoder from the given data
@@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v)
return convertSliceNumbers(*v, 0)
default:
return json.Unmarshal(data, v)
@@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error {
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}) error {
func convertMapNumbers(m map[string]interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for k, v := range m {
switch v := v.(type) {
case json.Number:
m[k], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v)
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v)
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err
@@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error {
// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertSliceNumbers(s []interface{}) error {
func convertSliceNumbers(s []interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for i, v := range s {
switch v := v.(type) {
case json.Number:
s[i], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v)
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v)
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err

View File

@@ -0,0 +1,93 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package naming
import (
"fmt"
"regexp"
goruntime "runtime"
"runtime/debug"
"strconv"
"strings"
)
// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages
// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
func GetNameFromCallsite(ignoredPackages ...string) string {
name := "????"
const maxStack = 10
for i := 1; i < maxStack; i++ {
_, file, line, ok := goruntime.Caller(i)
if !ok {
file, line, ok = extractStackCreator()
if !ok {
break
}
i += maxStack
}
if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) {
continue
}
file = trimPackagePrefix(file)
name = fmt.Sprintf("%s:%d", file, line)
break
}
return name
}
// hasPackage returns true if the file is in one of the ignored packages.
func hasPackage(file string, ignoredPackages []string) bool {
for _, ignoredPackage := range ignoredPackages {
if strings.Contains(file, ignoredPackage) {
return true
}
}
return false
}
// trimPackagePrefix reduces duplicate values off the front of a package name.
func trimPackagePrefix(file string) string {
if l := strings.LastIndex(file, "/vendor/"); l >= 0 {
return file[l+len("/vendor/"):]
}
if l := strings.LastIndex(file, "/src/"); l >= 0 {
return file[l+5:]
}
if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
return file[l+1:]
}
return file
}
var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
// if the creator cannot be located.
// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
func extractStackCreator() (string, int, bool) {
stack := debug.Stack()
matches := stackCreator.FindStringSubmatch(string(stack))
if matches == nil || len(matches) != 4 {
return "", 0, false
}
line, err := strconv.Atoi(matches[3])
if err != nil {
return "", 0, false
}
return matches[2], line, true
}

View File

@@ -31,8 +31,8 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"golang.org/x/net/http2"
"k8s.io/klog"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@@ -68,14 +68,17 @@ func IsProbableEOF(err error) bool {
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
msg := err.Error()
switch {
case err == io.EOF:
return true
case err.Error() == "http: can't write HTTP request on broken connection":
case msg == "http: can't write HTTP request on broken connection":
return true
case strings.Contains(err.Error(), "connection reset by peer"):
case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"):
return true
case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"):
case strings.Contains(msg, "connection reset by peer"):
return true
case strings.Contains(strings.ToLower(msg), "use of closed network connection"):
return true
}
return false
@@ -91,12 +94,16 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport {
// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
if t.DialContext == nil {
// If no custom dialer is set, use the default context dialer
if t.DialContext == nil && t.Dial == nil {
t.DialContext = defaultTransport.DialContext
}
if t.TLSHandshakeTimeout == 0 {
t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
}
if t.IdleConnTimeout == 0 {
t.IdleConnTimeout = defaultTransport.IdleConnTimeout
}
return t
}
@@ -106,15 +113,30 @@ func SetTransportDefaults(t *http.Transport) *http.Transport {
t = SetOldTransportDefaults(t)
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
glog.Infof("HTTP2 has been explicitly disabled")
} else {
klog.Infof("HTTP2 has been explicitly disabled")
} else if allowsHTTP2(t) {
if err := http2.ConfigureTransport(t); err != nil {
glog.Warningf("Transport failed http2 configuration: %v", err)
klog.Warningf("Transport failed http2 configuration: %v", err)
}
}
return t
}
func allowsHTTP2(t *http.Transport) bool {
if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 {
// the transport expressed no NextProto preference, allow
return true
}
for _, p := range t.TLSClientConfig.NextProtos {
if p == http2.NextProtoTLS {
// the transport explicitly allowed http/2
return true
}
}
// the transport explicitly set NextProtos and excluded http/2
return false
}
type RoundTripperWrapper interface {
http.RoundTripper
WrappedRoundTripper() http.RoundTripper
@@ -129,7 +151,18 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) {
switch transport := transport.(type) {
case *http.Transport:
return transport.DialContext, nil
// transport.DialContext takes precedence over transport.Dial
if transport.DialContext != nil {
return transport.DialContext, nil
}
// adapt transport.Dial to the DialWithContext signature
if transport.Dial != nil {
return func(ctx context.Context, net, addr string) (net.Conn, error) {
return transport.Dial(net, addr)
}, nil
}
// otherwise return nil
return nil, nil
case RoundTripperWrapper:
return DialerFor(transport.WrappedRoundTripper())
default:
@@ -167,10 +200,8 @@ func FormatURL(scheme string, host string, port int, path string) *url.URL {
}
func GetHTTPClient(req *http.Request) string {
if userAgent, ok := req.Header["User-Agent"]; ok {
if len(userAgent) > 0 {
return userAgent[0]
}
if ua := req.UserAgent(); len(ua) != 0 {
return ua
}
return "unknown"
}
@@ -311,9 +342,10 @@ type Dialer interface {
// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
// originalLocation). It returns the opened net.Conn and the raw response bytes.
func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) {
// If requireSameHostRedirects is true, only redirects to the same host are permitted.
func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) {
const (
maxRedirects = 10
maxRedirects = 9 // Fail on the 10th redirect
maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
)
@@ -357,7 +389,7 @@ redirectLoop:
resp, err := http.ReadResponse(respReader, nil)
if err != nil {
// Unable to read the backend response; let the client handle it.
glog.Warningf("Error reading backend response: %v", err)
klog.Warningf("Error reading backend response: %v", err)
break redirectLoop
}
@@ -377,10 +409,6 @@ redirectLoop:
resp.Body.Close() // not used
// Reset the connection.
intermediateConn.Close()
intermediateConn = nil
// Prepare to follow the redirect.
redirectStr := resp.Header.Get("Location")
if redirectStr == "" {
@@ -394,6 +422,15 @@ redirectLoop:
if err != nil {
return nil, nil, fmt.Errorf("malformed Location header: %v", err)
}
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
break redirectLoop
}
// Reset the connection.
intermediateConn.Close()
intermediateConn = nil
}
connToReturn := intermediateConn

View File

@@ -26,7 +26,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
type AddressFamily uint
@@ -53,6 +53,28 @@ type RouteFile struct {
parse func(input io.Reader) ([]Route, error)
}
// noRoutesError can be returned by ChooseBindAddress() in case of no routes
type noRoutesError struct {
message string
}
func (e noRoutesError) Error() string {
return e.message
}
// IsNoRoutesError checks if an error is of type noRoutesError
func IsNoRoutesError(err error) bool {
if err == nil {
return false
}
switch err.(type) {
case noRoutesError:
return true
default:
return false
}
}
var (
v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes}
v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes}
@@ -171,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool {
return false
}
if intf.Flags&net.FlagUp != 0 {
glog.V(4).Infof("Interface %v is up", intf.Name)
klog.V(4).Infof("Interface %v is up", intf.Name)
return true
}
return false
@@ -186,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool {
func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) {
if len(addrs) > 0 {
for i := range addrs {
glog.V(4).Infof("Checking addr %s.", addrs[i].String())
klog.V(4).Infof("Checking addr %s.", addrs[i].String())
ip, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return nil, err
}
if memberOf(ip, family) {
if ip.IsGlobalUnicast() {
glog.V(4).Infof("IP found %v", ip)
klog.V(4).Infof("IP found %v", ip)
return ip, nil
} else {
glog.V(4).Infof("Non-global unicast address found %v", ip)
klog.V(4).Infof("Non-global unicast address found %v", ip)
}
} else {
glog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
klog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
}
}
@@ -219,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte
if err != nil {
return nil, err
}
glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
matchingIP, err := getMatchingGlobalIP(addrs, forFamily)
if err != nil {
return nil, err
}
if matchingIP != nil {
glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
return matchingIP, nil
}
}
@@ -253,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("no interfaces found on host.")
}
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
for _, intf := range intfs {
if !isInterfaceUp(&intf) {
glog.V(4).Infof("Skipping: down interface %q", intf.Name)
klog.V(4).Infof("Skipping: down interface %q", intf.Name)
continue
}
if isLoopbackOrPointToPoint(&intf) {
glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
continue
}
addrs, err := nw.Addrs(&intf)
@@ -268,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, err
}
if len(addrs) == 0 {
glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
continue
}
for _, addr := range addrs {
@@ -277,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
}
if !memberOf(ip, family) {
glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
continue
}
// TODO: Decide if should open up to allow IPv6 LLAs in future.
if !ip.IsGlobalUnicast() {
glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
continue
}
glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
return ip, nil
}
}
@@ -347,7 +369,9 @@ func getAllDefaultRoutes() ([]Route, error) {
v6Routes, _ := v6File.extract()
routes = append(routes, v6Routes...)
if len(routes) == 0 {
return nil, fmt.Errorf("No default routes.")
return nil, noRoutesError{
message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name),
}
}
return routes, nil
}
@@ -357,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) {
// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP.
func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) {
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
for _, route := range routes {
if route.Family != family {
continue
}
glog.V(4).Infof("Default route transits interface %q", route.Interface)
klog.V(4).Infof("Default route transits interface %q", route.Interface)
finalIP, err := getIPFromInterface(route.Interface, family, nw)
if err != nil {
return nil, err
}
if finalIP != nil {
glog.V(4).Infof("Found active IP %v ", finalIP)
klog.V(4).Infof("Found active IP %v ", finalIP)
return finalIP, nil
}
}
}
glog.V(4).Infof("No active IP found by looking at default routes")
klog.V(4).Infof("No active IP found by looking at default routes")
return nil, fmt.Errorf("unable to select an IP from default routes.")
}

View File

@@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool {
}
return false
}
// Returns if the given err is "connection refused" error
func IsConnectionRefused(err error) bool {
if urlErr, ok := err.(*url.Error); ok {
err = urlErr.Err
}
if opErr, ok := err.(*net.OpError); ok {
err = opErr.Err
}
if osErr, ok := err.(*os.SyscallError); ok {
err = osErr.Err
}
if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
return true
}
return false
}

View File

@@ -18,11 +18,12 @@ package runtime
import (
"fmt"
"net/http"
"runtime"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
)
var (
@@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic}
// called in case of panic. HandleCrash actually crashes, after calling the
// handlers and logging the panic message.
//
// TODO: remove this function. We are switching to a world where it's safe for
// apiserver to panic, since it will be restarted by kubelet. At the beginning
// of the Kubernetes project, nothing was going to restart apiserver and so
// catching panics was important. But it's actually much simpler for monitoring
// software if we just exit when an unexpected panic happens.
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
func HandleCrash(additionalHandlers ...func(interface{})) {
if r := recover(); r != nil {
for _, fn := range PanicHandlers {
@@ -60,23 +57,26 @@ func HandleCrash(additionalHandlers ...func(interface{})) {
}
}
// logPanic logs the caller tree when a panic occurs.
// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
func logPanic(r interface{}) {
callers := getCallers(r)
glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
}
func getCallers(r interface{}) string {
callers := ""
for i := 0; true; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
callers = callers + fmt.Sprintf("%v:%v\n", file, line)
if r == http.ErrAbortHandler {
// honor the http.ErrAbortHandler sentinel panic value:
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log.
return
}
return callers
// Same as stdlib http server code. Manually allocate stack trace buffer size
// to prevent excessively large logs
const size = 64 << 10
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
if _, ok := r.(string); ok {
klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
} else {
klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
}
}
// ErrorHandlers is a list of functions which will be invoked when an unreturnable
@@ -111,7 +111,7 @@ func HandleError(err error) {
// logError prints an error with the call stack of the location it was reported
func logError(err error) {
glog.ErrorDepth(2, err)
klog.ErrorDepth(2, err)
}
type rudimentaryErrorBackoff struct {
@@ -151,13 +151,17 @@ func GetCaller() string {
// handlers to handle errors and panics the same way.
func RecoverFromPanic(err *error) {
if r := recover(); r != nil {
callers := getCallers(r)
// Same as stdlib http server code. Manually allocate stack trace buffer size
// to prevent excessively large logs
const size = 64 << 10
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
*err = fmt.Errorf(
"recovered from panic %q. (err=%v) Call stack:\n%v",
"recovered from panic %q. (err=%v) Call stack:\n%s",
r,
*err,
callers)
stacktrace)
}
}

View File

@@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte {
}
// Insert adds items to the set.
func (s Byte) Insert(items ...byte) {
func (s Byte) Insert(items ...byte) Byte {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s Byte) Delete(items ...byte) {
func (s Byte) Delete(items ...byte) Byte {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.

View File

@@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int {
}
// Insert adds items to the set.
func (s Int) Insert(items ...int) {
func (s Int) Insert(items ...int) Int {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s Int) Delete(items ...int) {
func (s Int) Delete(items ...int) Int {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.

205
vendor/k8s.io/apimachinery/pkg/util/sets/int32.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by set-gen. DO NOT EDIT.
package sets
import (
"reflect"
"sort"
)
// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption.
type Int32 map[int32]Empty
// NewInt32 creates a Int32 from a list of values.
func NewInt32(items ...int32) Int32 {
ss := Int32{}
ss.Insert(items...)
return ss
}
// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}).
// If the value passed in is not actually a map, this will panic.
func Int32KeySet(theMap interface{}) Int32 {
v := reflect.ValueOf(theMap)
ret := Int32{}
for _, keyValue := range v.MapKeys() {
ret.Insert(keyValue.Interface().(int32))
}
return ret
}
// Insert adds items to the set.
func (s Int32) Insert(items ...int32) Int32 {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s Int32) Delete(items ...int32) Int32 {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.
func (s Int32) Has(item int32) bool {
_, contained := s[item]
return contained
}
// HasAll returns true if and only if all items are contained in the set.
func (s Int32) HasAll(items ...int32) bool {
for _, item := range items {
if !s.Has(item) {
return false
}
}
return true
}
// HasAny returns true if any items are contained in the set.
func (s Int32) HasAny(items ...int32) bool {
for _, item := range items {
if s.Has(item) {
return true
}
}
return false
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s1.Difference(s2) = {a3}
// s2.Difference(s1) = {a4, a5}
func (s Int32) Difference(s2 Int32) Int32 {
result := NewInt32()
for key := range s {
if !s2.Has(key) {
result.Insert(key)
}
}
return result
}
// Union returns a new set which includes items in either s1 or s2.
// For example:
// s1 = {a1, a2}
// s2 = {a3, a4}
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 Int32) Union(s2 Int32) Int32 {
result := NewInt32()
for key := range s1 {
result.Insert(key)
}
for key := range s2 {
result.Insert(key)
}
return result
}
// Intersection returns a new set which includes the item in BOTH s1 and s2
// For example:
// s1 = {a1, a2}
// s2 = {a2, a3}
// s1.Intersection(s2) = {a2}
func (s1 Int32) Intersection(s2 Int32) Int32 {
var walk, other Int32
result := NewInt32()
if s1.Len() < s2.Len() {
walk = s1
other = s2
} else {
walk = s2
other = s1
}
for key := range walk {
if other.Has(key) {
result.Insert(key)
}
}
return result
}
// IsSuperset returns true if and only if s1 is a superset of s2.
func (s1 Int32) IsSuperset(s2 Int32) bool {
for item := range s2 {
if !s1.Has(item) {
return false
}
}
return true
}
// Equal returns true if and only if s1 is equal (as a set) to s2.
// Two sets are equal if their membership is identical.
// (In practice, this means same elements, order doesn't matter)
func (s1 Int32) Equal(s2 Int32) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
}
type sortableSliceOfInt32 []int32
func (s sortableSliceOfInt32) Len() int { return len(s) }
func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) }
func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// List returns the contents as a sorted int32 slice.
func (s Int32) List() []int32 {
res := make(sortableSliceOfInt32, 0, len(s))
for key := range s {
res = append(res, key)
}
sort.Sort(res)
return []int32(res)
}
// UnsortedList returns the slice with contents in random order.
func (s Int32) UnsortedList() []int32 {
res := make([]int32, 0, len(s))
for key := range s {
res = append(res, key)
}
return res
}
// Returns a single element from the set.
func (s Int32) PopAny() (int32, bool) {
for key := range s {
s.Delete(key)
return key, true
}
var zeroValue int32
return zeroValue, false
}
// Len returns the size of the set.
func (s Int32) Len() int {
return len(s)
}
func lessInt32(lhs, rhs int32) bool {
return lhs < rhs
}

View File

@@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 {
}
// Insert adds items to the set.
func (s Int64) Insert(items ...int64) {
func (s Int64) Insert(items ...int64) Int64 {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s Int64) Delete(items ...int64) {
func (s Int64) Delete(items ...int64) Int64 {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.

View File

@@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String {
}
// Insert adds items to the set.
func (s String) Insert(items ...string) {
func (s String) Insert(items ...string) String {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s String) Delete(items ...string) {
func (s String) Delete(items ...string) String {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.

View File

@@ -48,7 +48,7 @@ func (v *Error) ErrorBody() string {
var s string
switch v.Type {
case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
s = fmt.Sprintf("%s", v.Type)
s = v.Type.String()
default:
value := v.BadValue
valueType := reflect.TypeOf(value)
@@ -116,6 +116,10 @@ const (
// This is similar to ErrorTypeInvalid, but the error will not include the
// too-long value. See TooLong().
ErrorTypeTooLong ErrorType = "FieldValueTooLong"
// ErrorTypeTooMany is used to report "too many". This is used to
// report that a given list has too many items. This is similar to FieldValueTooLong,
// but the error indicates quantity instead of length.
ErrorTypeTooMany ErrorType = "FieldValueTooMany"
// ErrorTypeInternal is used to report other errors that are not related
// to user input. See InternalError().
ErrorTypeInternal ErrorType = "InternalError"
@@ -138,6 +142,8 @@ func (t ErrorType) String() string {
return "Forbidden"
case ErrorTypeTooLong:
return "Too long"
case ErrorTypeTooMany:
return "Too many"
case ErrorTypeInternal:
return "Internal error"
default:
@@ -201,6 +207,13 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error {
return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)}
}
// TooMany returns a *Error indicating "too many". This is used to
// report that a given list has too many items. This is similar to TooLong,
// but the returned error indicates quantity instead of length.
func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)}
}
// InternalError returns a *Error indicating "internal error". This is used
// to signal that an error was found that was not directly related to user
// input. The err argument must be non-nil.

View File

@@ -21,6 +21,7 @@ import (
"math"
"net"
"regexp"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
@@ -86,6 +87,8 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
// LabelValueMaxLength is a label's max length
const LabelValueMaxLength int = 63
var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
@@ -106,6 +109,8 @@ func IsValidLabelValue(value string) []string {
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
const DNS1123LabelMaxLength int = 63
var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
@@ -125,6 +130,8 @@ func IsDNS1123Label(value string) []string {
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
const DNS1123SubdomainMaxLength int = 253
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
@@ -144,6 +151,8 @@ func IsDNS1123Subdomain(value string) []string {
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035)
const DNS1035LabelMaxLength int = 63
var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$")
@@ -281,6 +290,7 @@ const percentErrMsg string = "a valid percent string must be a numeric string fo
var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
// IsValidPercent checks that string is in the form of a percentage
func IsValidPercent(percent string) []string {
if !percentRegexp.MatchString(percent) {
return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")}
@@ -389,3 +399,18 @@ func hasChDirPrefix(value string) []string {
}
return errs
}
// IsValidSocketAddr checks that string represents a valid socket address
// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254))
func IsValidSocketAddr(value string) []string {
var errs []string
ip, port, err := net.SplitHostPort(value)
if err != nil {
errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)")
return errs
}
portInt, _ := strconv.Atoi(port)
errs = append(errs, IsValidPortNum(portInt)...)
errs = append(errs, IsValidIP(ip)...)
return errs
}

View File

@@ -1,19 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package wait provides tools for polling or listening for changes
// to a condition.
package wait // import "k8s.io/apimachinery/pkg/util/wait"

View File

@@ -1,405 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"errors"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
)
// For any test of the style:
// ...
// <- time.After(timeout):
// t.Errorf("Timed out")
// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
var ForeverTestTimeout = time.Second * 30
// NeverStop may be passed to Until to make it never stop.
var NeverStop <-chan struct{} = make(chan struct{})
// Group allows to start a group of goroutines and wait for their completion.
type Group struct {
wg sync.WaitGroup
}
func (g *Group) Wait() {
g.wg.Wait()
}
// StartWithChannel starts f in a new goroutine in the group.
// stopCh is passed to f as an argument. f should stop when stopCh is available.
func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) {
g.Start(func() {
f(stopCh)
})
}
// StartWithContext starts f in a new goroutine in the group.
// ctx is passed to f as an argument. f should stop when ctx.Done() is available.
func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) {
g.Start(func() {
f(ctx)
})
}
// Start starts f in a new goroutine in the group.
func (g *Group) Start(f func()) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
f()
}()
}
// Forever calls f every period for ever.
//
// Forever is syntactic sugar on top of Until.
func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
var t *time.Timer
var sawTimeout bool
for {
select {
case <-stopCh:
return
default:
}
jitteredPeriod := period
if jitterFactor > 0.0 {
jitteredPeriod = Jitter(period, jitterFactor)
}
if !sliding {
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
func() {
defer runtime.HandleCrash()
f()
}()
if sliding {
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger t.C and stopCh, and t.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
return
case <-t.C:
sawTimeout = true
}
}
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
return wait
}
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multiplied by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multiplying
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. All other
// errors terminate immediately.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
duration := backoff.Duration
for i := 0; i < backoff.Steps; i++ {
if i != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
if ok, err := condition(); err != nil || ok {
return err
}
}
return ErrWaitTimeout
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
return pollInternal(wait, condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
return PollInfinite(interval, condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
select {
case <-stopCh:
return ErrWaitTimeout
default:
return PollUntil(interval, condition, stopCh)
}
}
// WaitFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed.
//
// If 'fn' returns an error the loop ends and that error is returned, and if
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the channel is closed without fn ever
// returning true.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
c := wait(done)
for {
_, open := <-c
ok, err := fn()
if err != nil {
return err
}
if ok {
return nil
}
if !open {
break
}
}
return ErrWaitTimeout
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
tick := time.NewTicker(interval)
defer tick.Stop()
var after <-chan time.Time
if timeout != 0 {
// time.After is more convenient, but it
// potentially leaves timers around much longer
// than necessary if we exit early.
timer := time.NewTimer(timeout)
after = timer.C
defer timer.Stop()
}
for {
select {
case <-tick.C:
// If the consumer isn't ready for this signal drop it and
// check the other channels.
select {
case ch <- struct{}{}:
default:
}
case <-after:
return
case <-done:
return
}
}
}()
return ch
})
}
// resetOrReuseTimer avoids allocating a new timer if one is already in use.
// Not safe for multiple threads.
func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer {
if t == nil {
return time.NewTimer(d)
}
if !t.Stop() && !sawTimeout {
<-t.C
}
t.Reset(d)
return t
}

View File

@@ -26,8 +26,8 @@ import (
"strings"
"unicode"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"k8s.io/klog"
"sigs.k8s.io/yaml"
)
// ToJSON converts a single YAML document into a JSON document
@@ -217,11 +217,9 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.decoder == nil {
buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
if isJSON {
glog.V(4).Infof("decoding stream as JSON")
d.decoder = json.NewDecoder(buffer)
d.rawData = origData
} else {
glog.V(4).Infof("decoding stream as YAML")
d.decoder = NewYAMLToJSONDecoder(buffer)
}
}
@@ -230,7 +228,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if syntax, ok := err.(*json.SyntaxError); ok {
data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
if readErr != nil {
glog.V(4).Infof("reading stream failed: %v", readErr)
klog.V(4).Infof("reading stream failed: %v", readErr)
}
js := string(data)