vendor: bump k8s to v0.25.4

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax
2023-03-14 18:20:37 +01:00
parent 4a73abfd64
commit cfa6b4f7c8
889 changed files with 96934 additions and 11708 deletions

View File

@ -39,13 +39,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/watch"
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/tools/metrics"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
var (
@ -82,6 +82,12 @@ func (r *RequestConstructionError) Error() string {
var noBackoff = &NoBackoff{}
type requestRetryFunc func(maxRetries int) WithRetry
func defaultRequestRetryFn(maxRetries int) WithRetry {
return &withRetry{maxRetries: maxRetries}
}
// Request allows for building up a request to a server in a chained fashion.
// Any errors are stored until the end of your call, so you only have to
// check once.
@ -93,6 +99,7 @@ type Request struct {
rateLimiter flowcontrol.RateLimiter
backoff BackoffManager
timeout time.Duration
maxRetries int
// generic components accessible via method setters
verb string
@ -109,9 +116,10 @@ type Request struct {
subresource string
// output
err error
body io.Reader
retry WithRetry
err error
body io.Reader
retryFn requestRetryFunc
}
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
@ -142,7 +150,8 @@ func NewRequest(c *RESTClient) *Request {
backoff: backoff,
timeout: timeout,
pathPrefix: pathPrefix,
retry: &withRetry{maxRetries: 10},
maxRetries: 10,
retryFn: defaultRequestRetryFn,
warningHandler: c.warningHandler,
}
@ -408,7 +417,10 @@ func (r *Request) Timeout(d time.Duration) *Request {
// function is specifically called with a different value.
// A zero maxRetries prevent it from doing retires and return an error immediately.
func (r *Request) MaxRetries(maxRetries int) *Request {
r.retry.SetMaxRetries(maxRetries)
if maxRetries < 0 {
maxRetries = 0
}
r.maxRetries = maxRetries
return r
}
@ -507,14 +519,17 @@ func (r Request) finalURLTemplate() url.URL {
newParams[k] = v
}
r.params = newParams
url := r.URL()
u := r.URL()
if u == nil {
return url.URL{}
}
segments := strings.Split(url.Path, "/")
segments := strings.Split(u.Path, "/")
groupIndex := 0
index := 0
trimmedBasePath := ""
if url != nil && r.c.base != nil && strings.Contains(url.Path, r.c.base.Path) {
p := strings.TrimPrefix(url.Path, r.c.base.Path)
if r.c.base != nil && strings.Contains(u.Path, r.c.base.Path) {
p := strings.TrimPrefix(u.Path, r.c.base.Path)
if !strings.HasPrefix(p, "/") {
p = "/" + p
}
@ -525,7 +540,7 @@ func (r Request) finalURLTemplate() url.URL {
groupIndex = 1
}
if len(segments) <= 2 {
return *url
return *u
}
const CoreGroupPrefix = "api"
@ -543,11 +558,11 @@ func (r Request) finalURLTemplate() url.URL {
// outlet here in case more API groups are added in future if ever possible:
// https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
// if a wrong API groups name is encountered, return the {prefix} for url.Path
url.Path = "/{prefix}"
url.RawQuery = ""
return *url
u.Path = "/{prefix}"
u.RawQuery = ""
return *u
}
//switch segLength := len(segments) - index; segLength {
// switch segLength := len(segments) - index; segLength {
switch {
// case len(segments) - index == 1:
// resource (with no name) do nothing
@ -570,8 +585,8 @@ func (r Request) finalURLTemplate() url.URL {
segments[index+3] = "{name}"
}
}
url.Path = path.Join(trimmedBasePath, path.Join(segments...))
return *url
u.Path = path.Join(trimmedBasePath, path.Join(segments...))
return *u
}
func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) error {
@ -582,7 +597,9 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err
now := time.Now()
err := r.rateLimiter.Wait(ctx)
if err != nil {
err = fmt.Errorf("client rate limiter Wait returned an error: %w", err)
}
latency := time.Since(now)
var message string
@ -619,12 +636,12 @@ type throttleSettings struct {
}
type throttledLogger struct {
clock utilclock.PassiveClock
clock clock.PassiveClock
settings []*throttleSettings
}
var globalThrottledLogger = &throttledLogger{
clock: utilclock.RealClock{},
clock: clock.RealClock{},
settings: []*throttleSettings{
{
logLevel: 2,
@ -688,34 +705,21 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
}
return false
}
var retryAfter *RetryAfter
retry := r.retryFn(r.maxRetries)
url := r.URL().String()
for {
if err := retry.Before(ctx, r); err != nil {
return nil, retry.WrapPreviousError(err)
}
req, err := r.newHTTPRequest(ctx)
if err != nil {
return nil, err
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return nil, err
}
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
if err != nil {
r.backoff.UpdateBackoff(r.c.base, err, 0)
} else {
r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
}
}
retry.After(ctx, r, resp, err)
if err == nil && resp.StatusCode == http.StatusOK {
return r.newStreamWatcher(resp)
}
@ -723,14 +727,8 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
done, transformErr := func() (bool, error) {
defer readAndCloseResponseBody(resp)
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, isErrRetryableFunc)
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil {
return false, nil
}
klog.V(4).Infof("Could not retry request - %v", err)
if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) {
return false, nil
}
if resp == nil {
@ -751,7 +749,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
// we need to return the error object from that.
err = transformErr
}
return nil, err
return nil, retry.WrapPreviousError(err)
}
}
}
@ -793,7 +791,7 @@ func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, er
if err != nil {
metrics.RequestResult.Increment(ctx, "<error>", req.verb, url)
} else {
//Metrics for failure codes
// Metrics for failure codes
metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url)
}
}
@ -816,9 +814,13 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
client = http.DefaultClient
}
var retryAfter *RetryAfter
retry := r.retryFn(r.maxRetries)
url := r.URL().String()
for {
if err := retry.Before(ctx, r); err != nil {
return nil, err
}
req, err := r.newHTTPRequest(ctx)
if err != nil {
return nil, err
@ -826,27 +828,9 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
if r.body != nil {
req.Body = ioutil.NopCloser(r.body)
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return nil, err
}
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if r.c.base != nil {
if err != nil {
r.backoff.UpdateBackoff(r.URL(), err, 0)
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
}
}
retry.After(ctx, r, resp, err)
if err != nil {
// we only retry on an HTTP response with 'Retry-After' header
return nil, err
@ -861,14 +845,8 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
done, transformErr := func() (bool, error) {
defer resp.Body.Close()
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, neverRetryError)
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil {
return false, nil
}
klog.V(4).Infof("Could not retry request - %v", err)
if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) {
return false, nil
}
result := r.transformResponse(resp, req)
if err := result.Error(); err != nil {
@ -926,7 +904,7 @@ func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
// fn at most once. It will return an error if a problem occurred prior to connecting to the
// server - the provided function is responsible for handling server errors.
func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error {
//Metrics for total request latency
// Metrics for total request latency
start := time.Now()
defer func() {
metrics.RequestLatency.Observe(ctx, r.verb, r.finalURLTemplate(), time.Since(start))
@ -959,36 +937,43 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
defer cancel()
}
isErrRetryableFunc := func(req *http.Request, err error) bool {
// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
// Thus in case of "GET" operations, we simply retry it.
// We are not automatically retrying "write" operations, as they are not idempotent.
if req.Method != "GET" {
return false
}
// For connection errors and apiserver shutdown errors retry.
if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
return true
}
return false
}
// Right now we make about ten retry attempts if we get a Retry-After response.
var retryAfter *RetryAfter
retry := r.retryFn(r.maxRetries)
for {
if err := retry.Before(ctx, r); err != nil {
return retry.WrapPreviousError(err)
}
req, err := r.newHTTPRequest(ctx)
if err != nil {
return err
}
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return err
}
retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
if err != nil {
r.backoff.UpdateBackoff(r.URL(), err, 0)
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
// The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown.
// https://pkg.go.dev/net/http#Request
if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) {
metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength))
}
retry.After(ctx, r, resp, err)
done := func() bool {
defer readAndCloseResponseBody(resp)
// if the the server returns an error in err, the response will be nil.
// if the server returns an error in err, the response will be nil.
f := func(req *http.Request, resp *http.Response) {
if resp == nil {
return
@ -996,33 +981,15 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
fn(req, resp)
}
var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, func(req *http.Request, err error) bool {
// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
// Thus in case of "GET" operations, we simply retry it.
// We are not automatically retrying "write" operations, as they are not idempotent.
if r.verb != "GET" {
return false
}
// For connection errors and apiserver shutdown errors retry.
if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
return true
}
if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) {
return false
})
if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body)
if err == nil {
return false
}
klog.V(4).Infof("Could not retry request - %v", err)
}
f(req, resp)
return true
}()
if done {
return err
return retry.WrapPreviousError(err)
}
}
}
@ -1031,8 +998,8 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
// processing.
//
// Error type:
// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// * http.Client.Do errors are returned directly.
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// - http.Client.Do errors are returned directly.
func (r *Request) Do(ctx context.Context) Result {
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
@ -1041,6 +1008,9 @@ func (r *Request) Do(ctx context.Context) Result {
if err != nil {
return Result{err: err}
}
if result.err == nil || len(result.body) > 0 {
metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
}
return result
}
@ -1057,6 +1027,9 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
if err != nil {
return nil, err
}
if result.err == nil || len(result.body) > 0 {
metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
}
return result.body, result.err
}
@ -1172,13 +1145,13 @@ func truncateBody(body string) string {
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
// whether the body is printable.
func glogBody(prefix string, body []byte) {
if klog.V(8).Enabled() {
if klogV := klog.V(8); klogV.Enabled() {
if bytes.IndexFunc(body, func(r rune) bool {
return r < 0x0a
}) != -1 {
klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
} else {
klog.Infof("%s: %s", prefix, truncateBody(string(body)))
klogV.Infof("%s: %s", prefix, truncateBody(string(body)))
}
}
}
@ -1193,15 +1166,15 @@ const maxUnstructuredResponseTextBytes = 2048
// unexpected responses. The rough structure is:
//
// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
// - this is the happy path
// - when you get this output, trust what the server sends
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
// generate a reasonable facsimile of the original failure.
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
// initial contact, the presence of mismatched body contents from posted content types
// - Give these a separate distinct error type and capture as much as possible of the original message
// - this is the happy path
// - when you get this output, trust what the server sends
// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
// generate a reasonable facsimile of the original failure.
// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
// initial contact, the presence of mismatched body contents from posted content types
// - Give these a separate distinct error type and capture as much as possible of the original message
//
// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {