vendor: bump k8s to v0.25.4

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax
2023-03-14 18:20:37 +01:00
parent 4a73abfd64
commit cfa6b4f7c8
889 changed files with 96934 additions and 11708 deletions

View File

@@ -1,445 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clock
import (
"sync"
"time"
)
// PassiveClock allows for injecting fake or real clocks into code
// that needs to read the current time but does not support scheduling
// activity in the future.
type PassiveClock interface {
Now() time.Time
Since(time.Time) time.Duration
}
// Clock allows for injecting fake or real clocks into code that
// needs to do arbitrary things based on time.
type Clock interface {
PassiveClock
After(time.Duration) <-chan time.Time
AfterFunc(time.Duration, func()) Timer
NewTimer(time.Duration) Timer
Sleep(time.Duration)
NewTicker(time.Duration) Ticker
}
// RealClock really calls time.Now()
type RealClock struct{}
// Now returns the current time.
func (RealClock) Now() time.Time {
return time.Now()
}
// Since returns time since the specified timestamp.
func (RealClock) Since(ts time.Time) time.Duration {
return time.Since(ts)
}
// After is the same as time.After(d).
func (RealClock) After(d time.Duration) <-chan time.Time {
return time.After(d)
}
// AfterFunc is the same as time.AfterFunc(d, f).
func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
return &realTimer{
timer: time.AfterFunc(d, f),
}
}
// NewTimer returns a new Timer.
func (RealClock) NewTimer(d time.Duration) Timer {
return &realTimer{
timer: time.NewTimer(d),
}
}
// NewTicker returns a new Ticker.
func (RealClock) NewTicker(d time.Duration) Ticker {
return &realTicker{
ticker: time.NewTicker(d),
}
}
// Sleep pauses the RealClock for duration d.
func (RealClock) Sleep(d time.Duration) {
time.Sleep(d)
}
// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
type FakePassiveClock struct {
lock sync.RWMutex
time time.Time
}
// FakeClock implements Clock, but returns an arbitrary time.
type FakeClock struct {
FakePassiveClock
// waiters are waiting for the fake time to pass their specified time
waiters []fakeClockWaiter
}
type fakeClockWaiter struct {
targetTime time.Time
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
afterFunc func()
}
// NewFakePassiveClock returns a new FakePassiveClock.
func NewFakePassiveClock(t time.Time) *FakePassiveClock {
return &FakePassiveClock{
time: t,
}
}
// NewFakeClock returns a new FakeClock
func NewFakeClock(t time.Time) *FakeClock {
return &FakeClock{
FakePassiveClock: *NewFakePassiveClock(t),
}
}
// Now returns f's time.
func (f *FakePassiveClock) Now() time.Time {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time
}
// Since returns time since the time in f.
func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time.Sub(ts)
}
// SetTime sets the time on the FakePassiveClock.
func (f *FakePassiveClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.time = t
}
// After is the Fake version of time.After(d).
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
})
return ch
}
// AfterFunc is the Fake version of time.AfterFunc(d, callback).
func (f *FakeClock) AfterFunc(d time.Duration, cb func()) Timer {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
timer := &fakeTimer{
fakeClock: f,
waiter: fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
afterFunc: cb,
},
}
f.waiters = append(f.waiters, timer.waiter)
return timer
}
// NewTimer is the Fake version of time.NewTimer(d).
func (f *FakeClock) NewTimer(d time.Duration) Timer {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
timer := &fakeTimer{
fakeClock: f,
waiter: fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
},
}
f.waiters = append(f.waiters, timer.waiter)
return timer
}
// NewTicker returns a new Ticker.
func (f *FakeClock) NewTicker(d time.Duration) Ticker {
f.lock.Lock()
defer f.lock.Unlock()
tickTime := f.time.Add(d)
ch := make(chan time.Time, 1) // hold one tick
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: tickTime,
stepInterval: d,
skipIfBlocked: true,
destChan: ch,
})
return &fakeTicker{
c: ch,
}
}
// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
func (f *FakeClock) Step(d time.Duration) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(f.time.Add(d))
}
// SetTime sets the time on a FakeClock.
func (f *FakeClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(t)
}
// Actually changes the time and checks any waiters. f must be write-locked.
func (f *FakeClock) setTimeLocked(t time.Time) {
f.time = t
newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
for i := range f.waiters {
w := &f.waiters[i]
if !w.targetTime.After(t) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
default:
}
} else {
w.destChan <- t
}
if w.afterFunc != nil {
w.afterFunc()
}
if w.stepInterval > 0 {
for !w.targetTime.After(t) {
w.targetTime = w.targetTime.Add(w.stepInterval)
}
newWaiters = append(newWaiters, *w)
}
} else {
newWaiters = append(newWaiters, f.waiters[i])
}
}
f.waiters = newWaiters
}
// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied
// (so you can write race-free tests).
func (f *FakeClock) HasWaiters() bool {
f.lock.RLock()
defer f.lock.RUnlock()
return len(f.waiters) > 0
}
// Sleep pauses the FakeClock for duration d.
func (f *FakeClock) Sleep(d time.Duration) {
f.Step(d)
}
// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
type IntervalClock struct {
Time time.Time
Duration time.Duration
}
// Now returns i's time.
func (i *IntervalClock) Now() time.Time {
i.Time = i.Time.Add(i.Duration)
return i.Time
}
// Since returns time since the time in i.
func (i *IntervalClock) Since(ts time.Time) time.Duration {
return i.Time.Sub(ts)
}
// After is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
panic("IntervalClock doesn't implement After")
}
// AfterFunc is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) AfterFunc(d time.Duration, cb func()) Timer {
panic("IntervalClock doesn't implement AfterFunc")
}
// NewTimer is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) NewTimer(d time.Duration) Timer {
panic("IntervalClock doesn't implement NewTimer")
}
// NewTicker is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) NewTicker(d time.Duration) Ticker {
panic("IntervalClock doesn't implement NewTicker")
}
// Sleep is currently unimplemented; will panic.
func (*IntervalClock) Sleep(d time.Duration) {
panic("IntervalClock doesn't implement Sleep")
}
// Timer allows for injecting fake or real timers into code that
// needs to do arbitrary things based on time.
type Timer interface {
C() <-chan time.Time
Stop() bool
Reset(d time.Duration) bool
}
// realTimer is backed by an actual time.Timer.
type realTimer struct {
timer *time.Timer
}
// C returns the underlying timer's channel.
func (r *realTimer) C() <-chan time.Time {
return r.timer.C
}
// Stop calls Stop() on the underlying timer.
func (r *realTimer) Stop() bool {
return r.timer.Stop()
}
// Reset calls Reset() on the underlying timer.
func (r *realTimer) Reset(d time.Duration) bool {
return r.timer.Reset(d)
}
// fakeTimer implements Timer based on a FakeClock.
type fakeTimer struct {
fakeClock *FakeClock
waiter fakeClockWaiter
}
// C returns the channel that notifies when this timer has fired.
func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
// Stop conditionally stops the timer. If the timer has neither fired
// nor been stopped then this call stops the timer and returns true,
// otherwise this call returns false. This is like time.Timer::Stop.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
// The timer has already fired or been stopped, unless it is found
// among the clock's waiters.
stopped := false
oldWaiters := f.fakeClock.waiters
newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
seekChan := f.waiter.destChan
for i := range oldWaiters {
// Identify the timer's fakeClockWaiter by the identity of the
// destination channel, nothing else is necessarily unique and
// constant since the timer's creation.
if oldWaiters[i].destChan == seekChan {
stopped = true
} else {
newWaiters = append(newWaiters, oldWaiters[i])
}
}
f.fakeClock.waiters = newWaiters
return stopped
}
// Reset conditionally updates the firing time of the timer. If the
// timer has neither fired nor been stopped then this call resets the
// timer to the fake clock's "now" + d and returns true, otherwise
// it creates a new waiter, adds it to the clock, and returns true.
//
// It is not possible to return false, because a fake timer can be reset
// from any state (waiting to fire, already fired, and stopped).
//
// See the GoDoc for time.Timer::Reset for more context on why
// the return value of Reset() is not useful.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
waiters := f.fakeClock.waiters
seekChan := f.waiter.destChan
for i := range waiters {
if waiters[i].destChan == seekChan {
waiters[i].targetTime = f.fakeClock.time.Add(d)
return true
}
}
// No existing waiter, timer has already fired or been reset.
// We should still enable Reset() to succeed by creating a
// new waiter and adding it to the clock's waiters.
newWaiter := fakeClockWaiter{
targetTime: f.fakeClock.time.Add(d),
destChan: seekChan,
}
f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
return true
}
// Ticker defines the Ticker interface
type Ticker interface {
C() <-chan time.Time
Stop()
}
type realTicker struct {
ticker *time.Ticker
}
func (t *realTicker) C() <-chan time.Time {
return t.ticker.C
}
func (t *realTicker) Stop() {
t.ticker.Stop()
}
type fakeTicker struct {
c <-chan time.Time
}
func (t *fakeTicker) C() <-chan time.Time {
return t.c
}
func (t *fakeTicker) Stop() {
}

View File

@@ -56,10 +56,10 @@ type lengthDelimitedFrameReader struct {
//
// The protocol is:
//
// stream: message ...
// message: prefix body
// prefix: 4 byte uint32 in BigEndian order, denotes length of body
// body: bytes (0..prefix)
// stream: message ...
// message: prefix body
// prefix: 4 byte uint32 in BigEndian order, denotes length of body
// body: bytes (0..prefix)
//
// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
// will be returned along with the number of bytes read.
@@ -132,14 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// Return whatever remaining data exists from an in progress frame
if n := len(r.remaining); n > 0 {
if n <= len(data) {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining...)
r.remaining = nil
return n, nil
}
n = len(data)
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining[:n]...)
r.remaining = r.remaining[n:]
return n, io.ErrShortBuffer
@@ -157,7 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// and set m to it, which means we need to copy the partial result back into data and preserve
// the remaining result for subsequent reads.
if len(m) > n {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], m[:n]...)
r.remaining = m[n:]
return n, io.ErrShortBuffer

View File

@@ -18,12 +18,11 @@ package spdy
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
@@ -32,6 +31,7 @@ import (
"strings"
"time"
"golang.org/x/net/proxy"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -65,12 +65,6 @@ type SpdyRoundTripper struct {
// Used primarily for mocking the proxy discovery in tests.
proxier func(req *http.Request) (*url.URL, error)
// followRedirects indicates if the round tripper should examine responses for redirects and
// follow them.
followRedirects bool
// requireSameHostRedirects restricts redirect following to only follow redirects to the same host
// as the original request.
requireSameHostRedirects bool
// pingPeriod is a period for sending Ping frames over established
// connections.
pingPeriod time.Duration
@@ -82,37 +76,31 @@ var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
// tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper {
return NewRoundTripperWithConfig(RoundTripperConfig{
TLS: tlsConfig,
FollowRedirects: followRedirects,
RequireSameHostRedirects: requireSameHostRedirects,
TLS: tlsConfig,
})
}
// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
// specified tlsConfig and proxy func.
func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
return NewRoundTripperWithConfig(RoundTripperConfig{
TLS: tlsConfig,
FollowRedirects: followRedirects,
RequireSameHostRedirects: requireSameHostRedirects,
Proxier: proxier,
TLS: tlsConfig,
Proxier: proxier,
})
}
// NewRoundTripperWithProxy creates a new SpdyRoundTripper with the specified
// NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified
// configuration.
func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {
if cfg.Proxier == nil {
cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
return &SpdyRoundTripper{
tlsConfig: cfg.TLS,
followRedirects: cfg.FollowRedirects,
requireSameHostRedirects: cfg.RequireSameHostRedirects,
proxier: cfg.Proxier,
pingPeriod: cfg.PingPeriod,
tlsConfig: cfg.TLS,
proxier: cfg.Proxier,
pingPeriod: cfg.PingPeriod,
}
}
@@ -125,9 +113,6 @@ type RoundTripperConfig struct {
// PingPeriod is a period for sending SPDY Pings on the connection.
// Optional.
PingPeriod time.Duration
FollowRedirects bool
RequireSameHostRedirects bool
}
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
@@ -163,6 +148,18 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
return s.dialWithoutProxy(req.Context(), req.URL)
}
switch proxyURL.Scheme {
case "socks5":
return s.dialWithSocks5Proxy(req, proxyURL)
case "https", "http", "":
return s.dialWithHttpProxy(req, proxyURL)
}
return nil, fmt.Errorf("proxy URL scheme not supported: %s", proxyURL.Scheme)
}
// dialWithHttpProxy dials the host specified by url through an http or an https proxy.
func (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {
// ensure we use a canonical host with proxyReq
targetHost := netutil.CanonicalAddr(req.URL)
@@ -173,20 +170,22 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
Host: targetHost,
}
proxyReq = *proxyReq.WithContext(req.Context())
if pa := s.proxyAuth(proxyURL); pa != "" {
proxyReq.Header = http.Header{}
proxyReq.Header.Set("Proxy-Authorization", pa)
}
proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
proxyDialConn, err := s.dialWithoutProxy(proxyReq.Context(), proxyURL)
if err != nil {
return nil, err
}
//lint:ignore SA1019 ignore deprecated httputil.NewProxyClientConn
//nolint:staticcheck // SA1019 ignore deprecated httputil.NewProxyClientConn
proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
_, err = proxyClientConn.Do(&proxyReq)
//lint:ignore SA1019 ignore deprecated httputil.ErrPersistEOF: it might be
//nolint:staticcheck // SA1019 ignore deprecated httputil.ErrPersistEOF: it might be
// returned from the invocation of proxyClientConn.Do
if err != nil && err != httputil.ErrPersistEOF {
return nil, err
@@ -194,9 +193,58 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
rwc, _ := proxyClientConn.Hijack()
if req.URL.Scheme != "https" {
return rwc, nil
if req.URL.Scheme == "https" {
return s.tlsConn(proxyReq.Context(), rwc, targetHost)
}
return rwc, nil
}
// dialWithSocks5Proxy dials the host specified by url through a socks5 proxy.
func (s *SpdyRoundTripper) dialWithSocks5Proxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {
// ensure we use a canonical host with proxyReq
targetHost := netutil.CanonicalAddr(req.URL)
proxyDialAddr := netutil.CanonicalAddr(proxyURL)
var auth *proxy.Auth
if proxyURL.User != nil {
pass, _ := proxyURL.User.Password()
auth = &proxy.Auth{
User: proxyURL.User.Username(),
Password: pass,
}
}
dialer := s.Dialer
if dialer == nil {
dialer = &net.Dialer{
Timeout: 30 * time.Second,
}
}
proxyDialer, err := proxy.SOCKS5("tcp", proxyDialAddr, auth, dialer)
if err != nil {
return nil, err
}
// According to the implementation of proxy.SOCKS5, the type assertion will always succeed
contextDialer, ok := proxyDialer.(proxy.ContextDialer)
if !ok {
return nil, errors.New("SOCKS5 Dialer must implement ContextDialer")
}
proxyDialConn, err := contextDialer.DialContext(req.Context(), "tcp", targetHost)
if err != nil {
return nil, err
}
if req.URL.Scheme == "https" {
return s.tlsConn(req.Context(), proxyDialConn, targetHost)
}
return proxyDialConn, nil
}
// tlsConn returns a TLS client side connection using rwc as the underlying transport.
func (s *SpdyRoundTripper) tlsConn(ctx context.Context, rwc net.Conn, targetHost string) (net.Conn, error) {
host, _, err := net.SplitHostPort(targetHost)
if err != nil {
@@ -214,17 +262,8 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
tlsConn := tls.Client(rwc, tlsConfig)
// need to manually call Handshake() so we can call VerifyHostname() below
if err := tlsConn.Handshake(); err != nil {
return nil, err
}
// Return if we were configured to skip validation
if tlsConfig.InsecureSkipVerify {
return tlsConn, nil
}
if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
if err := tlsConn.HandshakeContext(ctx); err != nil {
tlsConn.Close()
return nil, err
}
@@ -234,46 +273,20 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
dialAddr := netutil.CanonicalAddr(url)
dialer := s.Dialer
if dialer == nil {
dialer = &net.Dialer{}
}
if url.Scheme == "http" {
if s.Dialer == nil {
var d net.Dialer
return d.DialContext(ctx, "tcp", dialAddr)
} else {
return s.Dialer.DialContext(ctx, "tcp", dialAddr)
}
return dialer.DialContext(ctx, "tcp", dialAddr)
}
// TODO validate the TLSClientConfig is set up?
var conn *tls.Conn
var err error
if s.Dialer == nil {
conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
} else {
conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
tlsDialer := tls.Dialer{
NetDialer: dialer,
Config: s.tlsConfig,
}
if err != nil {
return nil, err
}
// Return if we were configured to skip validation
if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
return conn, nil
}
host, _, err := net.SplitHostPort(dialAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
host = s.tlsConfig.ServerName
}
err = conn.VerifyHostname(host)
if err != nil {
return nil, err
}
return conn, nil
return tlsDialer.DialContext(ctx, "tcp", dialAddr)
}
// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
@@ -290,39 +303,20 @@ func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
// connection.
func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
header := utilnet.CloneHeader(req.Header)
header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
req = utilnet.CloneRequest(req)
req.Header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
req.Header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
var (
conn net.Conn
rawResponse []byte
err error
)
if s.followRedirects {
conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
} else {
clone := utilnet.CloneRequest(req)
clone.Header = header
conn, err = s.Dial(clone)
}
conn, err := s.Dial(req)
if err != nil {
return nil, err
}
responseReader := bufio.NewReader(
io.MultiReader(
bytes.NewBuffer(rawResponse),
conn,
),
)
responseReader := bufio.NewReader(conn)
resp, err := http.ReadResponse(responseReader, nil)
if err != nil {
if conn != nil {
conn.Close()
}
conn.Close()
return nil, err
}

View File

@@ -94,7 +94,7 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque
hijacker, ok := w.(http.Hijacker)
if !ok {
errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
errorMsg := "unable to upgrade: unable to hijack response"
http.Error(w, errorMsg, http.StatusInternalServerError)
return nil
}

View File

@@ -78,25 +78,25 @@ func init() {
var fileDescriptor_94e046ae3ce6121c = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
0x64, 0x01, 0x00, 0x00,
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xb1, 0x4a, 0x03, 0x31,
0x1c, 0xc6, 0x13, 0x5b, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x58, 0x90, 0x5b, 0x4c,
0x56, 0x71, 0xec, 0x56, 0x10, 0x84, 0x56, 0x1c, 0xdc, 0xee, 0xda, 0x98, 0x86, 0x6b, 0x93, 0x90,
0xfb, 0x9f, 0x70, 0x5b, 0x1f, 0x41, 0x37, 0x47, 0x1f, 0xe7, 0xc6, 0x8e, 0x1d, 0xa4, 0x78, 0xf1,
0x2d, 0x9c, 0xe4, 0x72, 0x07, 0x3a, 0x3a, 0x25, 0xdf, 0xf7, 0xfd, 0x7e, 0x19, 0x12, 0xdc, 0xa6,
0xd7, 0x19, 0x95, 0x9a, 0xa5, 0x79, 0xc2, 0xad, 0xe2, 0xc0, 0x33, 0xf6, 0xcc, 0xd5, 0x42, 0x5b,
0xd6, 0x0e, 0xb1, 0x91, 0xeb, 0x78, 0xbe, 0x94, 0x8a, 0xdb, 0x82, 0x99, 0x54, 0xb0, 0x1c, 0xe4,
0x8a, 0x49, 0x05, 0x19, 0x58, 0x26, 0xb8, 0xe2, 0x36, 0x06, 0xbe, 0xa0, 0xc6, 0x6a, 0xd0, 0xfd,
0x51, 0x23, 0xd1, 0xbf, 0x12, 0x35, 0xa9, 0xa0, 0xb5, 0x44, 0x1b, 0xe9, 0xfc, 0x4a, 0x48, 0x58,
0xe6, 0x09, 0x9d, 0xeb, 0x35, 0x13, 0x5a, 0x68, 0xe6, 0xdd, 0x24, 0x7f, 0xf2, 0xc9, 0x07, 0x7f,
0x6b, 0xde, 0xbc, 0x78, 0xc5, 0xc1, 0xc9, 0x44, 0xc1, 0x9d, 0x9d, 0x81, 0x95, 0x4a, 0xf4, 0xa3,
0xa0, 0x0b, 0x85, 0xe1, 0x03, 0x1c, 0xe2, 0xa8, 0x33, 0x3e, 0x2b, 0xf7, 0x43, 0xe4, 0xf6, 0xc3,
0xee, 0x7d, 0x61, 0xf8, 0x77, 0x7b, 0x4e, 0x3d, 0xd1, 0xbf, 0x0c, 0x7a, 0x52, 0xc1, 0x43, 0xbc,
0x1a, 0x1c, 0x84, 0x38, 0x3a, 0x1c, 0x9f, 0xb6, 0x6c, 0x6f, 0xe2, 0xdb, 0x69, 0xbb, 0xd6, 0x5c,
0x06, 0xb6, 0xe6, 0x3a, 0x21, 0x8e, 0x8e, 0x7f, 0xb9, 0x99, 0x6f, 0xa7, 0xed, 0x7a, 0x73, 0xf4,
0xf6, 0x3e, 0x44, 0x9b, 0x8f, 0x10, 0x8d, 0x27, 0x65, 0x45, 0xd0, 0xb6, 0x22, 0x68, 0x57, 0x11,
0xb4, 0x71, 0x04, 0x97, 0x8e, 0xe0, 0xad, 0x23, 0x78, 0xe7, 0x08, 0xfe, 0x74, 0x04, 0xbf, 0x7c,
0x11, 0xf4, 0x38, 0xfa, 0xc7, 0x17, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0xf0, 0xa0,
0x81, 0x01, 0x00, 0x00,
}
func (m *IntOrString) Marshal() (dAtA []byte, err error) {

View File

@@ -22,7 +22,7 @@ syntax = "proto2";
package k8s.io.apimachinery.pkg.util.intstr;
// Package-wide variables from generator "generated".
option go_package = "intstr";
option go_package = "k8s.io/apimachinery/pkg/util/intstr";
// IntOrString is a type that can hold an int32 or a string. When used in
// JSON or YAML marshalling and unmarshalling, it produces or consumes the

View File

@@ -1,3 +1,4 @@
//go:build !notest
// +build !notest
/*

View File

@@ -131,6 +131,10 @@ func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
// the OpenAPI spec of this type.
func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (IntOrString) OpenAPIV3OneOfTypes() []string { return []string{"integer", "string"} }
func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
if intOrPercent == nil {
return &defaultValue
@@ -141,7 +145,7 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS
// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent.
// This method returns a scaled value from an IntOrString type. If the IntOrString
// is a percentage string value it's treated as a percentage and scaled appropriately
// in accordance to the total, if it's an int value it's treated as a a simple value and
// in accordance to the total, if it's an int value it's treated as a simple value and
// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error.
func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
if intOrPercent == nil {

View File

@@ -17,10 +17,11 @@ limitations under the License.
package json
import (
"bytes"
"encoding/json"
"fmt"
"io"
kjson "sigs.k8s.io/json"
)
// NewEncoder delegates to json.NewEncoder
@@ -38,50 +39,11 @@ func Marshal(v interface{}) ([]byte, error) {
// limit recursive depth to prevent stack overflow errors
const maxDepth = 10000
// Unmarshal unmarshals the given data
// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers
// are converted to int64 or float64
// Unmarshal unmarshals the given data.
// Object keys are case-sensitive.
// Numbers decoded into interface{} fields are converted to int64 or float64.
func Unmarshal(data []byte, v interface{}) error {
switch v := v.(type) {
case *map[string]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertMapNumbers(*v, 0)
case *[]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertSliceNumbers(*v, 0)
case *interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertInterfaceNumbers(v, 0)
default:
return json.Unmarshal(data, v)
}
return kjson.UnmarshalCaseSensitivePreserveInts(data, v)
}
// ConvertInterfaceNumbers converts any json.Number values to int64 or float64.

View File

@@ -45,7 +45,7 @@ import (
// and their field paths and types are exactly the same, then ExtractInto can be
// called with the root resource as the object and the subresource as the
// applyConfiguration. This works for "status", obviously, because status is
// represented by the exact same object as the root resource. This this does NOT
// represented by the exact same object as the root resource. This does NOT
// work, for example, with the "scale" subresources of Deployment, ReplicaSet and
// StatefulSet. While the spec.replicas, status.replicas fields are in the same
// exact field path locations as they are in autoscaling.Scale, the selector
@@ -75,6 +75,13 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan
if !ok {
return fmt.Errorf("unable to convert managed fields for %s to unstructured, expected map, got %T", fieldManager, u)
}
// set the type meta manually if it doesn't exist to avoid missing kind errors
// when decoding from unstructured JSON
if _, ok := m["kind"]; !ok && object.GetObjectKind().GroupVersionKind().Kind != "" {
m["kind"] = object.GetObjectKind().GroupVersionKind().Kind
m["apiVersion"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String()
}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil {
return fmt.Errorf("error extracting into obj from unstructured: %w", err)
}

View File

@@ -0,0 +1,128 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package managedfields
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kube-openapi/pkg/schemaconv"
"k8s.io/kube-openapi/pkg/util/proto"
smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
"sigs.k8s.io/structured-merge-diff/v4/typed"
)
// groupVersionKindExtensionKey is the key used to lookup the
// GroupVersionKind value for an object definition from the
// definition's "extensions" map.
const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind"
// GvkParser contains a Parser that allows introspecting the schema.
type GvkParser struct {
gvks map[schema.GroupVersionKind]string
parser typed.Parser
}
// Type returns a helper which can produce objects of the given type. Any
// errors are deferred until a further function is called.
func (p *GvkParser) Type(gvk schema.GroupVersionKind) *typed.ParseableType {
typeName, ok := p.gvks[gvk]
if !ok {
return nil
}
t := p.parser.Type(typeName)
return &t
}
// NewGVKParser builds a GVKParser from a proto.Models. This
// will automatically find the proper version of the object, and the
// corresponding schema information.
func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, error) {
typeSchema, err := schemaconv.ToSchemaWithPreserveUnknownFields(models, preserveUnknownFields)
if err != nil {
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
}
parser := GvkParser{
gvks: map[schema.GroupVersionKind]string{},
}
parser.parser = typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}}
for _, modelName := range models.ListModels() {
model := models.LookupModel(modelName)
if model == nil {
panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName))
}
gvkList := parseGroupVersionKind(model)
for _, gvk := range gvkList {
if len(gvk.Kind) > 0 {
_, ok := parser.gvks[gvk]
if ok {
return nil, fmt.Errorf("duplicate entry for %v", gvk)
}
parser.gvks[gvk] = modelName
}
}
}
return &parser, nil
}
// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one.
func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind {
extensions := s.GetExtensions()
gvkListResult := []schema.GroupVersionKind{}
// Get the extensions
gvkExtension, ok := extensions[groupVersionKindExtensionKey]
if !ok {
return []schema.GroupVersionKind{}
}
// gvk extension must be a list of at least 1 element.
gvkList, ok := gvkExtension.([]interface{})
if !ok {
return []schema.GroupVersionKind{}
}
for _, gvk := range gvkList {
// gvk extension list must be a map with group, version, and
// kind fields
gvkMap, ok := gvk.(map[interface{}]interface{})
if !ok {
continue
}
group, ok := gvkMap["group"].(string)
if !ok {
continue
}
version, ok := gvkMap["version"].(string)
if !ok {
continue
}
kind, ok := gvkMap["kind"].(string)
if !ok {
continue
}
gvkListResult = append(gvkListResult, schema.GroupVersionKind{
Group: group,
Version: version,
Kind: kind,
})
}
return gvkListResult
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package net
import (
"bufio"
"bytes"
"context"
"crypto/tls"
@@ -39,6 +38,7 @@ import (
"golang.org/x/net/http2"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@@ -237,6 +237,29 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) {
}
}
// CloseIdleConnectionsFor close idles connections for the Transport.
// If the Transport is wrapped it iterates over the wrapped round trippers
// until it finds one that implements the CloseIdleConnections method.
// If the Transport does not have a CloseIdleConnections method
// then this function does nothing.
func CloseIdleConnectionsFor(transport http.RoundTripper) {
if transport == nil {
return
}
type closeIdler interface {
CloseIdleConnections()
}
switch transport := transport.(type) {
case closeIdler:
transport.CloseIdleConnections()
case RoundTripperWrapper:
CloseIdleConnectionsFor(transport.WrappedRoundTripper())
default:
klog.Warningf("unknown transport type: %T", transport)
}
}
type TLSClientConfigHolder interface {
TLSClientConfig() *tls.Config
}
@@ -289,7 +312,7 @@ func SourceIPs(req *http.Request) []net.IP {
// Use the first valid one.
parts := strings.Split(hdrForwardedFor, ",")
for _, part := range parts {
ip := net.ParseIP(strings.TrimSpace(part))
ip := netutils.ParseIPSloppy(strings.TrimSpace(part))
if ip != nil {
srcIPs = append(srcIPs, ip)
}
@@ -299,7 +322,7 @@ func SourceIPs(req *http.Request) []net.IP {
// Try the X-Real-Ip header.
hdrRealIp := hdr.Get("X-Real-Ip")
if hdrRealIp != "" {
ip := net.ParseIP(hdrRealIp)
ip := netutils.ParseIPSloppy(hdrRealIp)
// Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain.
if ip != nil && !containsIP(srcIPs, ip) {
srcIPs = append(srcIPs, ip)
@@ -311,11 +334,11 @@ func SourceIPs(req *http.Request) []net.IP {
// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err == nil {
remoteIP = net.ParseIP(host)
remoteIP = netutils.ParseIPSloppy(host)
}
// Fallback if Remote Address was just IP.
if remoteIP == nil {
remoteIP = net.ParseIP(req.RemoteAddr)
remoteIP = netutils.ParseIPSloppy(req.RemoteAddr)
}
// Don't duplicate remote IP if it's already the last address in the chain.
@@ -382,7 +405,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
cidrs := []*net.IPNet{}
for _, noProxyRule := range noProxyRules {
_, cidr, _ := net.ParseCIDR(noProxyRule)
_, cidr, _ := netutils.ParseCIDRSloppy(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
}
@@ -393,7 +416,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
return func(req *http.Request) (*url.URL, error) {
ip := net.ParseIP(req.URL.Hostname())
ip := netutils.ParseIPSloppy(req.URL.Hostname())
if ip == nil {
return delegate(req)
}
@@ -422,104 +445,6 @@ type Dialer interface {
Dial(req *http.Request) (net.Conn, error)
}
// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
// originalLocation). It returns the opened net.Conn and the raw response bytes.
// If requireSameHostRedirects is true, only redirects to the same host are permitted.
func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) {
const (
maxRedirects = 9 // Fail on the 10th redirect
maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
)
var (
location = originalLocation
method = originalMethod
intermediateConn net.Conn
rawResponse = bytes.NewBuffer(make([]byte, 0, 256))
body = originalBody
)
defer func() {
if intermediateConn != nil {
intermediateConn.Close()
}
}()
redirectLoop:
for redirects := 0; ; redirects++ {
if redirects > maxRedirects {
return nil, nil, fmt.Errorf("too many redirects (%d)", redirects)
}
req, err := http.NewRequest(method, location.String(), body)
if err != nil {
return nil, nil, err
}
req.Header = header
intermediateConn, err = dialer.Dial(req)
if err != nil {
return nil, nil, err
}
// Peek at the backend response.
rawResponse.Reset()
respReader := bufio.NewReader(io.TeeReader(
io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes.
rawResponse)) // Save the raw response.
resp, err := http.ReadResponse(respReader, nil)
if err != nil {
// Unable to read the backend response; let the client handle it.
klog.Warningf("Error reading backend response: %v", err)
break redirectLoop
}
switch resp.StatusCode {
case http.StatusFound:
// Redirect, continue.
default:
// Don't redirect.
break redirectLoop
}
// Redirected requests switch to "GET" according to the HTTP spec:
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
method = "GET"
// don't send a body when following redirects
body = nil
resp.Body.Close() // not used
// Prepare to follow the redirect.
redirectStr := resp.Header.Get("Location")
if redirectStr == "" {
return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode)
}
// We have to parse relative to the current location, NOT originalLocation. For example,
// if we request http://foo.com/a and get back "http://bar.com/b", the result should be
// http://bar.com/b. If we then make that request and get back a redirect to "/c", the result
// should be http://bar.com/c, not http://foo.com/c.
location, err = location.Parse(redirectStr)
if err != nil {
return nil, nil, fmt.Errorf("malformed Location header: %v", err)
}
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
}
// Reset the connection.
intermediateConn.Close()
intermediateConn = nil
}
connToReturn := intermediateConn
intermediateConn = nil // Don't close the connection when we return it.
return connToReturn, rawResponse.Bytes(), nil
}
// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers.
func CloneRequest(req *http.Request) *http.Request {
r := new(http.Request)

View File

@@ -27,6 +27,7 @@ import (
"strings"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
type AddressFamily uint
@@ -221,7 +222,7 @@ func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error)
if len(addrs) > 0 {
for i := range addrs {
klog.V(4).Infof("Checking addr %s.", addrs[i].String())
ip, _, err := net.ParseCIDR(addrs[i].String())
ip, _, err := netutils.ParseCIDRSloppy(addrs[i].String())
if err != nil {
return nil, err
}
@@ -336,9 +337,9 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
ip, _, err := netutils.ParseCIDRSloppy(addr.String())
if err != nil {
return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
return nil, fmt.Errorf("unable to parse CIDR for interface %q: %s", intf.Name, err)
}
if !memberOf(ip, family) {
klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)

View File

@@ -25,9 +25,9 @@ import (
var validSchemes = sets.NewString("http", "https", "")
// SplitSchemeNamePort takes a string of the following forms:
// * "<name>", returns "", "<name>","", true
// * "<name>:<port>", returns "", "<name>","<port>",true
// * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
// - "<name>", returns "", "<name>","", true
// - "<name>:<port>", returns "", "<name>","<port>",true
// - "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
//
// Name must be non-empty or valid will be returned false.
// Scheme must be "http" or "https" if specified
@@ -57,9 +57,10 @@ func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
}
// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
// * "<name>"
// * "<name>:<port>"
// * "<scheme>:<name>:<port>"
// - "<name>"
// - "<name>:<port>"
// - "<scheme>:<name>:<port>"
//
// None of the parameters may contain a ':' character
// Name is required
// Scheme must be "", "http", or "https"

View File

@@ -25,6 +25,7 @@ import (
// IPNetEqual checks if the two input IPNets are representing the same subnet.
// For example,
//
// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet.
// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet.
func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {

View File

@@ -27,9 +27,10 @@ import (
)
var (
// ReallyCrash controls the behavior of HandleCrash and now defaults
// true. It's still exposed so components can optionally set to false
// to restore prior behavior.
// ReallyCrash controls the behavior of HandleCrash and defaults to
// true. It's exposed so components can optionally set to false
// to restore prior behavior. This flag is mostly used for tests to validate
// crash conditions.
ReallyCrash = true
)
@@ -141,7 +142,7 @@ func GetCaller() string {
runtime.Callers(3, pc[:])
f := runtime.FuncForPC(pc[0])
if f == nil {
return fmt.Sprintf("Unable to find caller")
return "Unable to find caller"
}
return f.Name()
}

View File

@@ -28,7 +28,7 @@ type Byte map[byte]Empty
// NewByte creates a Byte from a list of values.
func NewByte(items ...byte) Byte {
ss := Byte{}
ss := make(Byte, len(items))
ss.Insert(items...)
return ss
}
@@ -87,6 +87,15 @@ func (s Byte) HasAny(items ...byte) bool {
return false
}
// Clone returns a new set which is a copy of the current set.
func (s Byte) Clone() Byte {
result := make(Byte, len(s))
for key := range s {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
@@ -110,10 +119,7 @@ func (s Byte) Difference(s2 Byte) Byte {
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 Byte) Union(s2 Byte) Byte {
result := NewByte()
for key := range s1 {
result.Insert(key)
}
result := s1.Clone()
for key := range s2 {
result.Insert(key)
}

View File

@@ -28,7 +28,7 @@ type Int map[int]Empty
// NewInt creates a Int from a list of values.
func NewInt(items ...int) Int {
ss := Int{}
ss := make(Int, len(items))
ss.Insert(items...)
return ss
}
@@ -87,6 +87,15 @@ func (s Int) HasAny(items ...int) bool {
return false
}
// Clone returns a new set which is a copy of the current set.
func (s Int) Clone() Int {
result := make(Int, len(s))
for key := range s {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
@@ -110,10 +119,7 @@ func (s Int) Difference(s2 Int) Int {
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 Int) Union(s2 Int) Int {
result := NewInt()
for key := range s1 {
result.Insert(key)
}
result := s1.Clone()
for key := range s2 {
result.Insert(key)
}

View File

@@ -28,7 +28,7 @@ type Int32 map[int32]Empty
// NewInt32 creates a Int32 from a list of values.
func NewInt32(items ...int32) Int32 {
ss := Int32{}
ss := make(Int32, len(items))
ss.Insert(items...)
return ss
}
@@ -87,6 +87,15 @@ func (s Int32) HasAny(items ...int32) bool {
return false
}
// Clone returns a new set which is a copy of the current set.
func (s Int32) Clone() Int32 {
result := make(Int32, len(s))
for key := range s {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
@@ -110,10 +119,7 @@ func (s Int32) Difference(s2 Int32) Int32 {
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 Int32) Union(s2 Int32) Int32 {
result := NewInt32()
for key := range s1 {
result.Insert(key)
}
result := s1.Clone()
for key := range s2 {
result.Insert(key)
}

View File

@@ -28,7 +28,7 @@ type Int64 map[int64]Empty
// NewInt64 creates a Int64 from a list of values.
func NewInt64(items ...int64) Int64 {
ss := Int64{}
ss := make(Int64, len(items))
ss.Insert(items...)
return ss
}
@@ -87,6 +87,15 @@ func (s Int64) HasAny(items ...int64) bool {
return false
}
// Clone returns a new set which is a copy of the current set.
func (s Int64) Clone() Int64 {
result := make(Int64, len(s))
for key := range s {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
@@ -110,10 +119,7 @@ func (s Int64) Difference(s2 Int64) Int64 {
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 Int64) Union(s2 Int64) Int64 {
result := NewInt64()
for key := range s1 {
result.Insert(key)
}
result := s1.Clone()
for key := range s2 {
result.Insert(key)
}

View File

@@ -28,7 +28,7 @@ type String map[string]Empty
// NewString creates a String from a list of values.
func NewString(items ...string) String {
ss := String{}
ss := make(String, len(items))
ss.Insert(items...)
return ss
}
@@ -87,6 +87,15 @@ func (s String) HasAny(items ...string) bool {
return false
}
// Clone returns a new set which is a copy of the current set.
func (s String) Clone() String {
result := make(String, len(s))
for key := range s {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
@@ -110,10 +119,7 @@ func (s String) Difference(s2 String) String {
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 String) Union(s2 String) String {
result := NewString()
for key := range s1 {
result.Insert(key)
}
result := s1.Clone()
for key := range s2 {
result.Insert(key)
}

View File

@@ -42,19 +42,31 @@ func (v *Error) Error() string {
return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
}
type OmitValueType struct{}
var omitValue = OmitValueType{}
// ErrorBody returns the error message without the field name. This is useful
// for building nice-looking higher-level error reporting.
func (v *Error) ErrorBody() string {
var s string
switch v.Type {
case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
switch {
case v.Type == ErrorTypeRequired:
s = v.Type.String()
case v.Type == ErrorTypeForbidden:
s = v.Type.String()
case v.Type == ErrorTypeTooLong:
s = v.Type.String()
case v.Type == ErrorTypeInternal:
s = v.Type.String()
case v.BadValue == omitValue:
s = v.Type.String()
default:
value := v.BadValue
valueType := reflect.TypeOf(value)
if value == nil || valueType == nil {
value = "null"
} else if valueType.Kind() == reflect.Ptr {
} else if valueType.Kind() == reflect.Pointer {
if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() {
value = "null"
} else {
@@ -123,6 +135,8 @@ const (
// ErrorTypeInternal is used to report other errors that are not related
// to user input. See InternalError().
ErrorTypeInternal ErrorType = "InternalError"
// ErrorTypeTypeInvalid is for the value did not match the schema type for that field
ErrorTypeTypeInvalid ErrorType = "FieldValueTypeInvalid"
)
// String converts a ErrorType into its corresponding canonical error message.
@@ -146,11 +160,18 @@ func (t ErrorType) String() string {
return "Too many"
case ErrorTypeInternal:
return "Internal error"
case ErrorTypeTypeInvalid:
return "Invalid value"
default:
panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
}
}
// TypeInvalid returns a *Error indicating "type is invalid"
func TypeInvalid(field *Path, value interface{}, detail string) *Error {
return &Error{ErrorTypeTypeInvalid, field.String(), value, detail}
}
// NotFound returns a *Error indicating "value not found". This is
// used to report failure to find a requested value (e.g. looking up an ID).
func NotFound(field *Path, value interface{}) *Error {
@@ -207,11 +228,40 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error {
return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
}
// TooLongMaxLength returns a *Error indicating "too long". This is used to
// report that the given value is too long. This is similar to
// Invalid, but the returned error will not include the too-long
// value. If maxLength is negative, no max length will be included in the message.
func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
var msg string
if maxLength >= 0 {
msg = fmt.Sprintf("may not be longer than %d", maxLength)
} else {
msg = "value is too long"
}
return &Error{ErrorTypeTooLong, field.String(), value, msg}
}
// TooMany returns a *Error indicating "too many". This is used to
// report that a given list has too many items. This is similar to TooLong,
// but the returned error indicates quantity instead of length.
func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)}
var msg string
if maxQuantity >= 0 {
msg = fmt.Sprintf("must have at most %d items", maxQuantity)
} else {
msg = "has too many items"
}
var actual interface{}
if actualQuantity >= 0 {
actual = actualQuantity
} else {
actual = omitValue
}
return &Error{ErrorTypeTooMany, field.String(), actual, msg}
}
// InternalError returns a *Error indicating "internal error". This is used
@@ -239,6 +289,9 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
return nil
}
errs := make([]error, 0, len(list))
errorMsgs := sets.NewString()
for _, err := range list {

View File

@@ -25,6 +25,7 @@ import (
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
netutils "k8s.io/utils/net"
)
const qnameCharFmt string = "[A-Za-z0-9]"
@@ -333,7 +334,7 @@ func IsValidPortName(port string) []string {
errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)")
}
if !portNameOneLetterRegexp.MatchString(port) {
errs = append(errs, "must contain at least one letter or number (a-z, 0-9)")
errs = append(errs, "must contain at least one letter (a-z)")
}
if strings.Contains(port, "--") {
errs = append(errs, "must not contain consecutive hyphens")
@@ -346,7 +347,7 @@ func IsValidPortName(port string) []string {
// IsValidIP tests that the argument is a valid IP address.
func IsValidIP(value string) []string {
if net.ParseIP(value) == nil {
if netutils.ParseIPSloppy(value) == nil {
return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"}
}
return nil
@@ -355,7 +356,7 @@ func IsValidIP(value string) []string {
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := net.ParseIP(value)
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() == nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
}
@@ -365,7 +366,7 @@ func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := net.ParseIP(value)
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
}

View File

@@ -24,14 +24,16 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// For any test of the style:
// ...
// <- time.After(timeout):
// t.Errorf("Timed out")
//
// ...
// <- time.After(timeout):
// t.Errorf("Timed out")
//
// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
@@ -166,6 +168,9 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
if !t.Stop() {
<-t.C()
}
return
case <-t.C():
}
@@ -285,13 +290,13 @@ func (b *Backoff) Step() time.Duration {
return duration
}
// contextForChannel derives a child context from a parent channel.
// ContextForChannel derives a child context from a parent channel.
//
// The derived context's Done channel is closed when the returned cancel function
// is called or when the parent channel is closed, whichever happens first.
//
// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
@@ -461,7 +466,7 @@ func PollWithContext(ctx context.Context, interval, timeout time.Duration, condi
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
ctx, cancel := ContextForChannel(stopCh)
defer cancel()
return PollUntilWithContext(ctx, interval, condition.WithContext())
}
@@ -528,7 +533,7 @@ func PollImmediateWithContext(ctx context.Context, interval, timeout time.Durati
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
ctx, cancel := ContextForChannel(stopCh)
defer cancel()
return PollImmediateUntilWithContext(ctx, interval, condition.WithContext())
}
@@ -563,7 +568,7 @@ func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duratio
return poll(ctx, true, poller(interval, 0), condition)
}
// Internally used, each of the the public 'Poll*' function defined in this
// Internally used, each of the public 'Poll*' function defined in this
// package should invoke this internal function with appropriate parameters.
// ctx: the context specified by the caller, for infinite polling pass
// a context that never gets cancelled or expired.
@@ -612,7 +617,7 @@ type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// WaitFor gets a channel from 'wait(), and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed. If the channel is closed
// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
//
@@ -626,14 +631,14 @@ type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
// "uniform pseudo-random", the `fn` might still run one or multiple time,
// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
ctx, cancel := contextForChannel(done)
ctx, cancel := ContextForChannel(done)
defer cancel()
return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext())
}
// WaitForWithContext continually checks 'fn' as driven by 'wait'.
//
// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn'
// WaitForWithContext gets a channel from 'wait(), and then invokes 'fn'
// once for every value placed on the channel and once more when the
// channel is closed. If the channel is closed and 'fn'
// returns false without error, WaitForWithContext returns ErrWaitTimeout.

View File

@@ -59,6 +59,34 @@ func Unmarshal(data []byte, v interface{}) error {
}
}
// UnmarshalStrict unmarshals the given data
// strictly (erroring when there are duplicate fields).
func UnmarshalStrict(data []byte, v interface{}) error {
preserveIntFloat := func(d *json.Decoder) *json.Decoder {
d.UseNumber()
return d
}
switch v := v.(type) {
case *map[string]interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertMapNumbers(*v, 0)
case *[]interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertSliceNumbers(*v, 0)
case *interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertInterfaceNumbers(v, 0)
default:
return yaml.UnmarshalStrict(data, v)
}
}
// ToJSON converts a single YAML document into a JSON document
// or returns an error. If the document appears to be JSON the
// YAML decoding path is not used (so that error messages are