go.mod: update k8s deps to v0.26.2 (remove "replace" rule)

Replace rules are not inherited by consumers of buildx as a module, and as
such would default to use the v0.26.2 version. Removing the replace rules
also removes various (indirect) dependencies (although brings in some new
packages from k8s itself).

The "azure" and "gcp" authentication packages in k8s.io/go-client are now
no longer functional, so removing those imports.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2023-05-22 13:05:43 +02:00
parent 580820a4de
commit d582a21acd
443 changed files with 40329 additions and 28541 deletions

View File

@ -20,7 +20,6 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -519,7 +518,7 @@ func InClusterConfig() (*Config, error) {
return nil, ErrNotInCluster
}
token, err := ioutil.ReadFile(tokenFile)
token, err := os.ReadFile(tokenFile)
if err != nil {
return nil, err
}
@ -572,10 +571,7 @@ func LoadTLSFiles(c *Config) error {
}
c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
if err != nil {
return err
}
return nil
return err
}
// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
@ -585,7 +581,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
return data, nil
}
if len(file) > 0 {
fileData, err := ioutil.ReadFile(file)
fileData, err := os.ReadFile(file)
if err != nil {
return []byte{}, err
}

View File

@ -55,6 +55,7 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro
InsecureSkipTLSVerify: config.Insecure,
CertificateAuthorityData: caData,
ProxyURL: proxyURL,
DisableCompression: config.DisableCompression,
Config: config.ExecProvider.Config,
}, nil
}
@ -79,6 +80,7 @@ func ExecClusterToConfig(cluster *clientauthenticationapi.Cluster) (*Config, err
ServerName: cluster.TLSServerName,
CAData: cluster.CertificateAuthorityData,
},
Proxy: proxy,
Proxy: proxy,
DisableCompression: cluster.DisableCompression,
}, nil
}

View File

@ -22,10 +22,10 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"os"
"path"
"reflect"
"strconv"
@ -34,6 +34,7 @@ import (
"time"
"golang.org/x/net/http2"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -116,8 +117,11 @@ type Request struct {
subresource string
// output
err error
body io.Reader
err error
// only one of body / bodyBytes may be set. requests using body are not retriable.
body io.Reader
bodyBytes []byte
retryFn requestRetryFunc
}
@ -437,18 +441,21 @@ func (r *Request) Body(obj interface{}) *Request {
}
switch t := obj.(type) {
case string:
data, err := ioutil.ReadFile(t)
data, err := os.ReadFile(t)
if err != nil {
r.err = err
return r
}
glogBody("Request Body", data)
r.body = bytes.NewReader(data)
r.body = nil
r.bodyBytes = data
case []byte:
glogBody("Request Body", t)
r.body = bytes.NewReader(t)
r.body = nil
r.bodyBytes = t
case io.Reader:
r.body = t
r.bodyBytes = nil
case runtime.Object:
// callers may pass typed interface pointers, therefore we must check nil with reflection
if reflect.ValueOf(t).IsNil() {
@ -465,7 +472,8 @@ func (r *Request) Body(obj interface{}) *Request {
return r
}
glogBody("Request Body", data)
r.body = bytes.NewReader(data)
r.body = nil
r.bodyBytes = data
r.SetHeader("Content-Type", r.c.content.ContentType)
default:
r.err = fmt.Errorf("unknown type used for body: %+v", obj)
@ -825,9 +833,6 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
if r.body != nil {
req.Body = ioutil.NopCloser(r.body)
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
retry.After(ctx, r, resp, err)
@ -889,8 +894,20 @@ func (r *Request) requestPreflightCheck() error {
}
func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
var body io.Reader
switch {
case r.body != nil && r.bodyBytes != nil:
return nil, fmt.Errorf("cannot set both body and bodyBytes")
case r.body != nil:
body = r.body
case r.bodyBytes != nil:
// Create a new reader specifically for this request.
// Giving each request a dedicated reader allows retries to avoid races resetting the request body.
body = bytes.NewReader(r.bodyBytes)
}
url := r.URL().String()
req, err := http.NewRequest(r.verb, url, r.body)
req, err := http.NewRequest(r.verb, url, body)
if err != nil {
return nil, err
}
@ -1018,7 +1035,7 @@ func (r *Request) Do(ctx context.Context) Result {
func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
result.body, result.err = ioutil.ReadAll(resp.Body)
result.body, result.err = io.ReadAll(resp.Body)
glogBody("Response Body", result.body)
if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
result.err = r.transformUnstructuredResponseError(resp, req, result.body)
@ -1037,7 +1054,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
var body []byte
if resp.Body != nil {
data, err := ioutil.ReadAll(resp.Body)
data, err := io.ReadAll(resp.Body)
switch err.(type) {
case nil:
body = data
@ -1179,7 +1196,7 @@ const maxUnstructuredResponseTextBytes = 2048
// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
if body == nil && resp.Body != nil {
if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
if data, err := io.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
body = data
}
}
@ -1288,6 +1305,14 @@ func (r Result) StatusCode(statusCode *int) Result {
return r
}
// ContentType returns the "Content-Type" response header into the passed
// string, returning the Result for possible chaining. (Only valid if no
// error code was returned.)
func (r Result) ContentType(contentType *string) Result {
*contentType = r.contentType
return r
}
// Into stores the result into obj, if possible. If obj is nil it is ignored.
// If the returned object is of type Status and has .Status != StatusSuccess, the
// additional information in Status will be used to enrich the error.

View File

@ -108,10 +108,13 @@ func (c *Config) TransportConfig() (*transport.Config, error) {
Groups: c.Impersonate.Groups,
Extra: c.Impersonate.Extra,
},
Dial: c.Dial,
Proxy: c.Proxy,
}
if c.Dial != nil {
conf.DialHolder = &transport.DialHolder{Dial: c.Dial}
}
if c.ExecProvider != nil && c.AuthProvider != nil {
return nil, errors.New("execProvider and authProvider cannot be used in combination")
}

View File

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
@ -154,6 +153,11 @@ func (r *withRetry) IsNextRetry(ctx context.Context, restReq *Request, httpReq *
return false
}
if restReq.body != nil {
// we have an opaque reader, we can't safely reset it
return false
}
r.attempts++
r.retryAfter = &RetryAfter{Attempt: r.attempts}
if r.attempts > r.maxRetries {
@ -210,18 +214,6 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
return nil
}
// At this point we've made atleast one attempt, post which the response
// body should have been fully read and closed in order for it to be safe
// to reset the request body before we reconnect, in order for us to reuse
// the same TCP connection.
if seeker, ok := request.body.(io.Seeker); ok && request.body != nil {
if _, err := seeker.Seek(0, io.SeekStart); err != nil {
err = fmt.Errorf("failed to reset the request body while retrying a request: %v", err)
r.trackPreviousError(err)
return err
}
}
// if we are here, we have made attempt(s) at least once before.
if request.backoff != nil {
delay := request.backoff.CalculateBackoff(url)
@ -345,7 +337,7 @@ func readAndCloseResponseBody(resp *http.Response) {
defer resp.Body.Close()
if resp.ContentLength <= maxBodySlurpSize {
io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
}
}