vendor: bump k8s dependencies to v0.29.2

Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax
2024-02-24 16:41:41 +01:00
parent ae0a5e495a
commit 303e509bbf
761 changed files with 66147 additions and 29461 deletions

54
vendor/k8s.io/apimachinery/pkg/util/dump/dump.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dump
import (
"github.com/davecgh/go-spew/spew"
)
var prettyPrintConfig = &spew.ConfigState{
Indent: " ",
DisableMethods: true,
DisablePointerAddresses: true,
DisableCapacities: true,
}
// The config MUST NOT be changed because that could change the result of a hash operation
var prettyPrintConfigForHash = &spew.ConfigState{
Indent: " ",
SortKeys: true,
DisableMethods: true,
SpewKeys: true,
DisablePointerAddresses: true,
DisableCapacities: true,
}
// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String()
// The output may change over time, so for guaranteed output please take more direct control
func Pretty(a interface{}) string {
return prettyPrintConfig.Sdump(a)
}
// ForHash keeps the original Spew.Sprintf format to ensure the same checksum
func ForHash(a interface{}) string {
return prettyPrintConfigForHash.Sprintf("%#v", a)
}
// OneLine outputs the object in one line
func OneLine(a interface{}) string {
return prettyPrintConfig.Sprintf("%#v", a)
}

View File

@@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
return NewAggregate(result)
}
// Reduce will return err or, if err is an Aggregate and only has one item,
// Reduce will return err or nil, if err is an Aggregate and only has one item,
// the first item in the aggregate.
func Reduce(err error) error {
if agg, ok := err.(Aggregate); ok && err != nil {

View File

@@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
return &lengthDelimitedFrameWriter{w: w}
}
// Write writes a single frame to the nested writer, prepending it with the length in
// Write writes a single frame to the nested writer, prepending it with the length
// in bytes of data (as a 4 byte, bigendian uint32).
func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package httpstream
import (
"errors"
"fmt"
"io"
"net/http"
@@ -95,6 +96,26 @@ type Stream interface {
Identifier() uint32
}
// UpgradeFailureError encapsulates the cause for why the streaming
// upgrade request failed. Implements error interface.
type UpgradeFailureError struct {
Cause error
}
func (u *UpgradeFailureError) Error() string {
return fmt.Sprintf("unable to upgrade streaming request: %s", u.Cause)
}
// IsUpgradeFailure returns true if the passed error is (or wrapped error contains)
// the UpgradeFailureError.
func IsUpgradeFailure(err error) bool {
if err == nil {
return false
}
var upgradeErr *UpgradeFailureError
return errors.As(err, &upgradeErr)
}
// IsUpgradeRequest returns true if the given request is a connection upgrade request
func IsUpgradeRequest(req *http.Request) bool {
for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {

View File

@@ -23,7 +23,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"net/http/httputil"
@@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/httpstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
apiproxy "k8s.io/apimachinery/pkg/util/proxy"
"k8s.io/apimachinery/third_party/forked/golang/netutil"
)
@@ -68,6 +69,10 @@ type SpdyRoundTripper struct {
// pingPeriod is a period for sending Ping frames over established
// connections.
pingPeriod time.Duration
// upgradeTransport is an optional substitute for dialing if present. This field is
// mutually exclusive with the "tlsConfig", "Dialer", and "proxier".
upgradeTransport http.RoundTripper
}
var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
@@ -76,43 +81,61 @@ var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
// tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper {
func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) {
return NewRoundTripperWithConfig(RoundTripperConfig{
TLS: tlsConfig,
TLS: tlsConfig,
UpgradeTransport: nil,
})
}
// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
// specified tlsConfig and proxy func.
func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) {
return NewRoundTripperWithConfig(RoundTripperConfig{
TLS: tlsConfig,
Proxier: proxier,
TLS: tlsConfig,
Proxier: proxier,
UpgradeTransport: nil,
})
}
// NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified
// configuration.
func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {
// configuration. Returns an error if the SpdyRoundTripper is misconfigured.
func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) {
// Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier.
if cfg.UpgradeTransport != nil {
if cfg.TLS != nil || cfg.Proxier != nil {
return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier")
}
tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport)
if err != nil {
return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err)
}
cfg.TLS = tlsConfig
}
if cfg.Proxier == nil {
cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
return &SpdyRoundTripper{
tlsConfig: cfg.TLS,
proxier: cfg.Proxier,
pingPeriod: cfg.PingPeriod,
}
tlsConfig: cfg.TLS,
proxier: cfg.Proxier,
pingPeriod: cfg.PingPeriod,
upgradeTransport: cfg.UpgradeTransport,
}, nil
}
// RoundTripperConfig is a set of options for an SpdyRoundTripper.
type RoundTripperConfig struct {
// TLS configuration used by the round tripper.
// TLS configuration used by the round tripper if UpgradeTransport not present.
TLS *tls.Config
// Proxier is a proxy function invoked on each request. Optional.
Proxier func(*http.Request) (*url.URL, error)
// PingPeriod is a period for sending SPDY Pings on the connection.
// Optional.
PingPeriod time.Duration
// UpgradeTransport is a subtitute transport used for dialing. If set,
// this field will be used instead of "TLS" and "Proxier" for connection creation.
// Optional.
UpgradeTransport http.RoundTripper
}
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
@@ -123,7 +146,13 @@ func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
conn, err := s.dial(req)
var conn net.Conn
var err error
if s.upgradeTransport != nil {
conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport)
} else {
conn, err = s.dial(req)
}
if err != nil {
return nil, err
}
@@ -337,7 +366,7 @@ func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connec
if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
defer resp.Body.Close()
responseError := ""
responseErrorBytes, err := ioutil.ReadAll(resp.Body)
responseErrorBytes, err := io.ReadAll(resp.Body)
if err != nil {
responseError = "unable to read error from server response"
} else {

View File

@@ -54,7 +54,7 @@ const (
// FromInt creates an IntOrString object with an int32 value. It is
// your responsibility not to call this method with a value greater
// than int32.
// TODO: convert to (val int32)
// Deprecated: use FromInt32 instead.
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
@@ -62,19 +62,24 @@ func FromInt(val int) IntOrString {
return IntOrString{Type: Int, IntVal: int32(val)}
}
// FromInt32 creates an IntOrString object with an int32 value.
func FromInt32(val int32) IntOrString {
return IntOrString{Type: Int, IntVal: val}
}
// FromString creates an IntOrString object with a string value.
func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
}
// Parse the given string and try to convert it to an integer before
// Parse the given string and try to convert it to an int32 integer before
// setting it as a string value.
func Parse(val string) IntOrString {
i, err := strconv.Atoi(val)
i, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return FromString(val)
}
return FromInt(i)
return FromInt32(int32(i))
}
// UnmarshalJSON implements the json.Unmarshaller interface.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,57 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package managedfields
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// FieldManager updates the managed fields and merges applied
// configurations.
type FieldManager = internal.FieldManager
// NewDefaultFieldManager creates a new FieldManager that merges apply requests
// and update managed fields for other types of requests.
func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) {
f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
}
return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil
}
// NewDefaultCRDFieldManager creates a new FieldManager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) {
f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
}
return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil
}
func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error {
_, err := internal.DecodeManagedFields(encodedManagedFields)
return err
}

View File

@@ -0,0 +1,60 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"sync"
"time"
)
// AtMostEvery will never run the method more than once every specified
// duration.
type AtMostEvery struct {
delay time.Duration
lastCall time.Time
mutex sync.Mutex
}
// NewAtMostEvery creates a new AtMostEvery, that will run the method at
// most every given duration.
func NewAtMostEvery(delay time.Duration) *AtMostEvery {
return &AtMostEvery{
delay: delay,
}
}
// updateLastCall returns true if the lastCall time has been updated,
// false if it was too early.
func (s *AtMostEvery) updateLastCall() bool {
s.mutex.Lock()
defer s.mutex.Unlock()
if time.Since(s.lastCall) < s.delay {
return false
}
s.lastCall = time.Now()
return true
}
// Do will run the method if enough time has passed, and return true.
// Otherwise, it does nothing and returns false.
func (s *AtMostEvery) Do(fn func()) bool {
if !s.updateLastCall() {
return false
}
fn()
return true
}

View File

@@ -0,0 +1,74 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type buildManagerInfoManager struct {
fieldManager Manager
groupVersion schema.GroupVersion
subresource string
}
var _ Manager = &buildManagerInfoManager{}
// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier
// combining operation and version for update requests, and just operation for apply requests.
func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager {
return &buildManagerInfoManager{
fieldManager: f,
groupVersion: gv,
subresource: subresource,
}
}
// Update implements Manager.
func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate)
if err != nil {
return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err)
}
return f.fieldManager.Update(liveObj, newObj, managed, manager)
}
// Apply implements Manager.
func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply)
if err != nil {
return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err)
}
return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
}
func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) {
managerInfo := metav1.ManagedFieldsEntry{
Manager: prefix,
Operation: operation,
APIVersion: f.groupVersion.String(),
Subresource: f.subresource,
}
if managerInfo.Manager == "" {
managerInfo.Manager = "unknown"
}
return BuildManagerIdentifier(&managerInfo)
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
type capManagersManager struct {
fieldManager Manager
maxUpdateManagers int
oldUpdatesManagerName string
}
var _ Manager = &capManagersManager{}
// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates
// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update.
func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager {
return &capManagersManager{
fieldManager: fieldManager,
maxUpdateManagers: maxUpdateManagers,
oldUpdatesManagerName: "ancient-changes",
}
}
// Update implements Manager.
func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager)
if err != nil {
return object, managed, err
}
if managed, err = f.capUpdateManagers(managed); err != nil {
return nil, nil, fmt.Errorf("failed to cap update managers: %v", err)
}
return object, managed, nil
}
// Apply implements Manager.
func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
}
// capUpdateManagers merges a number of the oldest update entries into versioned buckets,
// such that the number of entries from updates does not exceed f.maxUpdateManagers.
func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) {
// Gather all entries from updates
updaters := []string{}
for manager, fields := range managed.Fields() {
if !fields.Applied() {
updaters = append(updaters, manager)
}
}
if len(updaters) <= f.maxUpdateManagers {
return managed, nil
}
// If we have more than the maximum, sort the update entries by time, oldest first.
sort.Slice(updaters, func(i, j int) bool {
iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0)
if iTime != nil {
iSeconds = iTime.Unix()
}
if jTime != nil {
jSeconds = jTime.Unix()
}
if iSeconds != jSeconds {
return iSeconds < jSeconds
}
return updaters[i] < updaters[j]
})
// Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap
versionToFirstManager := map[string]string{}
for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ {
manager := updaters[i]
vs := managed.Fields()[manager]
time := managed.Times()[manager]
version := string(vs.APIVersion())
// Create a new manager identifier for the versioned bucket entry.
// The version for this manager comes from the version of the update being merged into the bucket.
bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{
Manager: f.oldUpdatesManagerName,
Operation: metav1.ManagedFieldsOperationUpdate,
APIVersion: version,
})
if err != nil {
return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err)
}
// Merge the fieldets if this is not the first time the version was seen.
// Otherwise just record the manager name in versionToFirstManager
if first, ok := versionToFirstManager[version]; ok {
// If the bucket doesn't exists yet, create one.
if _, ok := managed.Fields()[bucket]; !ok {
s := managed.Fields()[first]
delete(managed.Fields(), first)
managed.Fields()[bucket] = s
}
managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied())
delete(managed.Fields(), manager)
length--
// Use the time from the update being merged into the bucket, since it is more recent.
managed.Times()[bucket] = time
} else {
versionToFirstManager[version] = manager
}
}
return managed, nil
}

View File

@@ -0,0 +1,89 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/merge"
)
// NewConflictError returns an error including details on the requests apply conflicts
func NewConflictError(conflicts merge.Conflicts) *errors.StatusError {
causes := []metav1.StatusCause{}
for _, conflict := range conflicts {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldManagerConflict,
Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)),
Field: conflict.Path.String(),
})
}
return errors.NewApplyConflict(causes, getConflictMessage(conflicts))
}
func getConflictMessage(conflicts merge.Conflicts) string {
if len(conflicts) == 1 {
return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path)
}
m := map[string][]fieldpath.Path{}
for _, conflict := range conflicts {
m[conflict.Manager] = append(m[conflict.Manager], conflict.Path)
}
uniqueManagers := []string{}
for manager := range m {
uniqueManagers = append(uniqueManagers, manager)
}
// Print conflicts by sorted managers.
sort.Strings(uniqueManagers)
messages := []string{}
for _, manager := range uniqueManagers {
messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager)))
for _, path := range m[manager] {
messages = append(messages, fmt.Sprintf("- %v", path))
}
}
return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n"))
}
func printManager(manager string) string {
encodedManager := &metav1.ManagedFieldsEntry{}
if err := json.Unmarshal([]byte(manager), encodedManager); err != nil {
return fmt.Sprintf("%q", manager)
}
managerStr := fmt.Sprintf("%q", encodedManager.Manager)
if encodedManager.Subresource != "" {
managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource)
}
if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate {
if encodedManager.Time == nil {
return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion)
}
return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339))
}
return managerStr
}

View File

@@ -0,0 +1,209 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"sigs.k8s.io/structured-merge-diff/v4/merge"
)
// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates
// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum.
// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries.
const DefaultMaxUpdateManagers int = 10
// DefaultTrackOnCreateProbability defines the default probability that the field management of an object
// starts being tracked from the object's creation, instead of from the first time the object is applied to.
const DefaultTrackOnCreateProbability float32 = 1
var atMostEverySecond = NewAtMostEvery(time.Second)
// FieldManager updates the managed fields and merges applied
// configurations.
type FieldManager struct {
fieldManager Manager
subresource string
}
// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields
// on update and apply requests.
func NewFieldManager(f Manager, subresource string) *FieldManager {
return &FieldManager{fieldManager: f, subresource: subresource}
}
// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic.
func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager {
return NewFieldManager(
NewVersionCheckManager(
NewLastAppliedUpdater(
NewLastAppliedManager(
NewProbabilisticSkipNonAppliedManager(
NewCapManagersManager(
NewBuildManagerInfoManager(
NewManagedFieldsUpdater(
NewStripMetaManager(f),
), kind.GroupVersion(), subresource,
), DefaultMaxUpdateManagers,
), objectCreater, DefaultTrackOnCreateProbability,
), typeConverter, objectConverter, kind.GroupVersion(),
),
), kind,
), subresource,
)
}
func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) {
liveAccessor, err := meta.Accessor(liveObj)
if err != nil {
return nil, err
}
// We take the managedFields of the live object in case the request tries to
// manually set managedFields via a subresource.
if ignoreManagedFieldsFromRequestObject {
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
}
// If the object doesn't have metadata, we should just return without trying to
// set the managedFields at all, so creates/updates/patches will work normally.
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return nil, err
}
if isResetManagedFields(newAccessor.GetManagedFields()) {
return NewEmptyManaged(), nil
}
// If the managed field is empty or we failed to decode it,
// let's try the live object. This is to prevent clients who
// don't understand managedFields from deleting it accidentally.
managed, err := DecodeManagedFields(newAccessor.GetManagedFields())
if err != nil || len(managed.Fields()) == 0 {
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
}
return managed, nil
}
func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) {
if err != nil {
return NewEmptyManaged(), nil
}
return managed, nil
}
// Update is used when the object has already been merged (non-apply
// use-case), and simply updates the managed fields in the output
// object.
func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) {
// First try to decode the managed fields provided in the update,
// This is necessary to allow directly updating managed fields.
isSubresource := f.subresource != ""
managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource)
if err != nil {
return newObj, nil
}
RemoveObjectManagedFields(newObj)
if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil {
return nil, err
}
if err = EncodeObjectManagedFields(object, managed); err != nil {
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
}
return object, nil
}
// UpdateNoErrors is the same as Update, but it will not return
// errors. If an error happens, the object is returned with
// managedFields cleared.
func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object {
obj, err := f.Update(liveObj, newObj, manager)
if err != nil {
atMostEverySecond.Do(func() {
ns, name := "unknown", "unknown"
if accessor, err := meta.Accessor(newObj); err == nil {
ns = accessor.GetNamespace()
name = accessor.GetName()
}
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind",
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
})
// Explicitly remove managedFields on failure, so that
// we can't have garbage in it.
RemoveObjectManagedFields(newObj)
return newObj
}
return obj
}
// Returns true if the managedFields indicate that the user is trying to
// reset the managedFields, i.e. if the list is non-nil but empty, or if
// the list has one empty item.
func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool {
if len(managedFields) == 0 {
return managedFields != nil
}
if len(managedFields) == 1 {
return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{})
}
return false
}
// Apply is used when server-side apply is called, as it merges the
// object and updates the managed fields.
func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) {
// If the object doesn't have metadata, apply isn't allowed.
accessor, err := meta.Accessor(liveObj)
if err != nil {
return nil, fmt.Errorf("couldn't get accessor: %v", err)
}
// Decode the managed fields in the live object, since it isn't allowed in the patch.
managed, err := DecodeManagedFields(accessor.GetManagedFields())
if err != nil {
return nil, fmt.Errorf("failed to decode managed fields: %v", err)
}
object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
if err != nil {
if conflicts, ok := err.(merge.Conflicts); ok {
return nil, NewConflictError(conflicts)
}
return nil, err
}
if err = EncodeObjectManagedFields(object, managed); err != nil {
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
}
return object, nil
}

View File

@@ -0,0 +1,47 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"bytes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// EmptyFields represents a set with no paths
// It looks like metav1.Fields{Raw: []byte("{}")}
var EmptyFields = func() metav1.FieldsV1 {
f, err := SetToFields(*fieldpath.NewSet())
if err != nil {
panic("should never happen")
}
return f
}()
// FieldsToSet creates a set paths from an input trie of fields
func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) {
err = s.FromJSON(bytes.NewReader(f.Raw))
return s, err
}
// SetToFields creates a trie of fields from an input set of paths
func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) {
f.Raw, err = s.ToJSON()
return f, err
}

View File

@@ -0,0 +1,50 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/runtime"
)
// LastAppliedConfigAnnotation is the annotation used to store the previous
// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
//
// This is a copy of the corev1 annotation since we don't want to depend on the whole package.
const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration"
// SetLastApplied sets the last-applied annotation the given value in
// the object.
func SetLastApplied(obj runtime.Object, value string) error {
accessor, err := meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
var annotations = accessor.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[LastAppliedConfigAnnotation] = value
if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil {
delete(annotations, LastAppliedConfigAnnotation)
}
accessor.SetAnnotations(annotations)
return nil
}

View File

@@ -0,0 +1,171 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/merge"
)
type lastAppliedManager struct {
fieldManager Manager
typeConverter TypeConverter
objectConverter runtime.ObjectConvertor
groupVersion schema.GroupVersion
}
var _ Manager = &lastAppliedManager{}
// NewLastAppliedManager converts the client-side apply annotation to
// server-side apply managed fields
func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager {
return &lastAppliedManager{
fieldManager: fieldManager,
typeConverter: typeConverter,
objectConverter: objectConverter,
groupVersion: groupVersion,
}
}
// Update implements Manager.
func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
return f.fieldManager.Update(liveObj, newObj, managed, manager)
}
// Apply will consider the last-applied annotation
// for upgrading an object managed by client-side apply to server-side apply
// without conflicts.
func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force)
// Upgrade the client-side apply annotation only from kubectl server-side-apply.
// To opt-out of this behavior, users may specify a different field manager.
if manager != "kubectl" {
return newLiveObj, newManaged, newErr
}
// Check if we have conflicts
if newErr == nil {
return newLiveObj, newManaged, newErr
}
conflicts, ok := newErr.(merge.Conflicts)
if !ok {
return newLiveObj, newManaged, newErr
}
conflictSet := conflictsToSet(conflicts)
// Check if conflicts are allowed due to client-side apply,
// and if so, then force apply
allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj)
if err != nil {
return newLiveObj, newManaged, newErr
}
if !conflictSet.Difference(allowedConflictSet).Empty() {
newConflicts := conflictsDifference(conflicts, allowedConflictSet)
return newLiveObj, newManaged, newConflicts
}
return f.fieldManager.Apply(liveObj, newObj, managed, manager, true)
}
func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) {
var accessor, err = meta.Accessor(liveObj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
// If there is no client-side apply annotation, then there is nothing to do
var annotations = accessor.GetAnnotations()
if annotations == nil {
return nil, fmt.Errorf("no last applied annotation")
}
var lastApplied, ok = annotations[LastAppliedConfigAnnotation]
if !ok || lastApplied == "" {
return nil, fmt.Errorf("no last applied annotation")
}
liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion)
if err != nil {
return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err)
}
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned)
if err != nil {
return nil, fmt.Errorf("failed to convert live obj to typed: %v", err)
}
var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}}
err = json.Unmarshal([]byte(lastApplied), lastAppliedObj)
if err != nil {
return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied)
}
if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() {
return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err)
}
lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj)
if err != nil {
return nil, fmt.Errorf("failed to convert last applied to typed: %v", err)
}
lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet()
if err != nil {
return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err)
}
comparison, err := lastAppliedObjTyped.Compare(liveObjTyped)
if err != nil {
return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err)
}
// Remove fields in last applied that are different, added, or missing in
// the live object.
// Because last-applied fields don't match the live object fields,
// then we don't own these fields.
lastAppliedObjFieldSet = lastAppliedObjFieldSet.
Difference(comparison.Modified).
Difference(comparison.Added).
Difference(comparison.Removed)
return lastAppliedObjFieldSet, nil
}
// TODO: replace with merge.Conflicts.ToSet()
func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set {
conflictSet := fieldpath.NewSet()
for _, conflict := range []merge.Conflict(conflicts) {
conflictSet.Insert(conflict.Path)
}
return conflictSet
}
func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts {
newConflicts := []merge.Conflict{}
for _, conflict := range []merge.Conflict(conflicts) {
if !s.Has(conflict.Path) {
newConflicts = append(newConflicts, conflict)
}
}
return newConflicts
}

View File

@@ -0,0 +1,102 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
type lastAppliedUpdater struct {
fieldManager Manager
}
var _ Manager = &lastAppliedUpdater{}
// NewLastAppliedUpdater sets the client-side apply annotation up to date with
// server-side apply managed fields
func NewLastAppliedUpdater(fieldManager Manager) Manager {
return &lastAppliedUpdater{
fieldManager: fieldManager,
}
}
// Update implements Manager.
func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
return f.fieldManager.Update(liveObj, newObj, managed, manager)
}
// server-side apply managed fields
func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force)
if err != nil {
return liveObj, managed, err
}
// Sync the client-side apply annotation only from kubectl server-side apply.
// To opt-out of this behavior, users may specify a different field manager.
//
// If the client-side apply annotation doesn't exist,
// then continue because we have no annotation to update
if manager == "kubectl" && hasLastApplied(liveObj) {
lastAppliedValue, err := buildLastApplied(newObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err)
}
err = SetLastApplied(liveObj, lastAppliedValue)
if err != nil {
return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err)
}
}
return liveObj, managed, err
}
func hasLastApplied(obj runtime.Object) bool {
var accessor, err = meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
var annotations = accessor.GetAnnotations()
if annotations == nil {
return false
}
lastApplied, ok := annotations[LastAppliedConfigAnnotation]
return ok && len(lastApplied) > 0
}
func buildLastApplied(obj runtime.Object) (string, error) {
obj = obj.DeepCopyObject()
var accessor, err = meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
// Remove the annotation from the object before encoding the object
var annotations = accessor.GetAnnotations()
delete(annotations, LastAppliedConfigAnnotation)
accessor.SetAnnotations(annotations)
lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
if err != nil {
return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err)
}
return string(lastApplied), nil
}

View File

@@ -0,0 +1,248 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"encoding/json"
"fmt"
"sort"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
type ManagedInterface interface {
// Fields gets the fieldpath.ManagedFields.
Fields() fieldpath.ManagedFields
// Times gets the timestamps associated with each operation.
Times() map[string]*metav1.Time
}
type managedStruct struct {
fields fieldpath.ManagedFields
times map[string]*metav1.Time
}
var _ ManagedInterface = &managedStruct{}
// Fields implements ManagedInterface.
func (m *managedStruct) Fields() fieldpath.ManagedFields {
return m.fields
}
// Times implements ManagedInterface.
func (m *managedStruct) Times() map[string]*metav1.Time {
return m.times
}
// NewEmptyManaged creates an empty ManagedInterface.
func NewEmptyManaged() ManagedInterface {
return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{})
}
// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation.
func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface {
return &managedStruct{
fields: f,
times: t,
}
}
// RemoveObjectManagedFields removes the ManagedFields from the object
// before we merge so that it doesn't appear in the ManagedFields
// recursively.
func RemoveObjectManagedFields(obj runtime.Object) {
accessor, err := meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
accessor.SetManagedFields(nil)
}
// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields
func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error {
accessor, err := meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
encodedManagedFields, err := encodeManagedFields(managed)
if err != nil {
return fmt.Errorf("failed to convert back managed fields to API: %v", err)
}
accessor.SetManagedFields(encodedManagedFields)
return nil
}
// DecodeManagedFields converts ManagedFields from the wire format (api format)
// to the format used by sigs.k8s.io/structured-merge-diff
func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) {
managed := managedStruct{}
managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields))
managed.times = make(map[string]*metav1.Time, len(encodedManagedFields))
for i, encodedVersionedSet := range encodedManagedFields {
switch encodedVersionedSet.Operation {
case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate:
default:
return nil, fmt.Errorf("operation must be `Apply` or `Update`")
}
if len(encodedVersionedSet.APIVersion) < 1 {
return nil, fmt.Errorf("apiVersion must not be empty")
}
switch encodedVersionedSet.FieldsType {
case "FieldsV1":
// Valid case.
case "":
return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i)
default:
return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i)
}
manager, err := BuildManagerIdentifier(&encodedVersionedSet)
if err != nil {
return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err)
}
managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet)
if err != nil {
return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err)
}
managed.times[manager] = encodedVersionedSet.Time
}
return &managed, nil
}
// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry
func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) {
encodedManagerCopy := *encodedManager
// Never include fields type in the manager identifier
encodedManagerCopy.FieldsType = ""
// Never include the fields in the manager identifier
encodedManagerCopy.FieldsV1 = nil
// Never include the time in the manager identifier
encodedManagerCopy.Time = nil
// For appliers, don't include the APIVersion in the manager identifier,
// so it will always have the same manager identifier each time it applied.
if encodedManager.Operation == metav1.ManagedFieldsOperationApply {
encodedManagerCopy.APIVersion = ""
}
// Use the remaining fields to build the manager identifier
b, err := json.Marshal(&encodedManagerCopy)
if err != nil {
return "", fmt.Errorf("error marshalling manager identifier: %v", err)
}
return string(b), nil
}
func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) {
fields := EmptyFields
if encodedVersionedSet.FieldsV1 != nil {
fields = *encodedVersionedSet.FieldsV1
}
set, err := FieldsToSet(fields)
if err != nil {
return nil, fmt.Errorf("error decoding set: %v", err)
}
return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil
}
// encodeManagedFields converts ManagedFields from the format used by
// sigs.k8s.io/structured-merge-diff to the wire format (api format)
func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) {
if len(managed.Fields()) == 0 {
return nil, nil
}
encodedManagedFields = []metav1.ManagedFieldsEntry{}
for manager := range managed.Fields() {
versionedSet := managed.Fields()[manager]
v, err := encodeManagerVersionedSet(manager, versionedSet)
if err != nil {
return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err)
}
if t, ok := managed.Times()[manager]; ok {
v.Time = t
}
encodedManagedFields = append(encodedManagedFields, *v)
}
return sortEncodedManagedFields(encodedManagedFields)
}
func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) {
sort.Slice(encodedManagedFields, func(i, j int) bool {
p, q := encodedManagedFields[i], encodedManagedFields[j]
if p.Operation != q.Operation {
return p.Operation < q.Operation
}
pSeconds, qSeconds := int64(0), int64(0)
if p.Time != nil {
pSeconds = p.Time.Unix()
}
if q.Time != nil {
qSeconds = q.Time.Unix()
}
if pSeconds != qSeconds {
return pSeconds < qSeconds
}
if p.Manager != q.Manager {
return p.Manager < q.Manager
}
if p.APIVersion != q.APIVersion {
return p.APIVersion < q.APIVersion
}
return p.Subresource < q.Subresource
})
return encodedManagedFields, nil
}
func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) {
encodedVersionedSet = &metav1.ManagedFieldsEntry{}
// Get as many fields as we can from the manager identifier
err = json.Unmarshal([]byte(manager), encodedVersionedSet)
if err != nil {
return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err)
}
// Get the APIVersion, Operation, and Fields from the VersionedSet
encodedVersionedSet.APIVersion = string(versionedSet.APIVersion())
if versionedSet.Applied() {
encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply
}
encodedVersionedSet.FieldsType = "FieldsV1"
fields, err := SetToFields(*versionedSet.Set())
if err != nil {
return nil, fmt.Errorf("error encoding set: %v", err)
}
encodedVersionedSet.FieldsV1 = &fields
return encodedVersionedSet, nil
}

View File

@@ -0,0 +1,82 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
type managedFieldsUpdater struct {
fieldManager Manager
}
var _ Manager = &managedFieldsUpdater{}
// NewManagedFieldsUpdater is responsible for updating the managedfields
// in the object, updating the time of the operation as necessary. For
// updates, it uses a hard-coded manager to detect if things have
// changed, and swaps back the correct manager after the operation is
// done.
func NewManagedFieldsUpdater(fieldManager Manager) Manager {
return &managedFieldsUpdater{
fieldManager: fieldManager,
}
}
// Update implements Manager.
func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
self := "current-operation"
object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self)
if err != nil {
return object, managed, err
}
// If the current operation took any fields from anything, it means the object changed,
// so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager
if vs, ok := managed.Fields()[self]; ok {
delete(managed.Fields(), self)
if previous, ok := managed.Fields()[manager]; ok {
managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied())
} else {
managed.Fields()[manager] = vs
}
managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()}
}
return object, managed, nil
}
// Apply implements Manager.
func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
if err != nil {
return object, managed, err
}
if object != nil {
managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()}
} else {
object = liveObj.DeepCopyObject()
RemoveObjectManagedFields(object)
}
return object, managed, nil
}

View File

@@ -0,0 +1,52 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
type Managed interface {
// Fields gets the fieldpath.ManagedFields.
Fields() fieldpath.ManagedFields
// Times gets the timestamps associated with each operation.
Times() map[string]*metav1.Time
}
// Manager updates the managed fields and merges applied configurations.
type Manager interface {
// Update is used when the object has already been merged (non-apply
// use-case), and simply updates the managed fields in the output
// object.
// * `liveObj` is not mutated by this function
// * `newObj` may be mutated by this function
// Returns the new object with managedFields removed, and the object's new
// proposed managedFields separately.
Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error)
// Apply is used when server-side apply is called, as it merges the
// object and updates the managed fields.
// * `liveObj` is not mutated by this function
// * `newObj` may be mutated by this function
// Returns the new object with managedFields removed, and the object's new
// proposed managedFields separately.
Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error)
}

View File

@@ -0,0 +1,140 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/value"
)
const (
// Field indicates that the content of this path element is a field's name
Field = "f"
// Value indicates that the content of this path element is a field's value
Value = "v"
// Index indicates that the content of this path element is an index in an array
Index = "i"
// Key indicates that the content of this path element is a key value map
Key = "k"
// Separator separates the type of a path element from the contents
Separator = ":"
)
// NewPathElement parses a serialized path element
func NewPathElement(s string) (fieldpath.PathElement, error) {
split := strings.SplitN(s, Separator, 2)
if len(split) < 2 {
return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s)
}
switch split[0] {
case Field:
return fieldpath.PathElement{
FieldName: &split[1],
}, nil
case Value:
val, err := value.FromJSON([]byte(split[1]))
if err != nil {
return fieldpath.PathElement{}, err
}
return fieldpath.PathElement{
Value: &val,
}, nil
case Index:
i, err := strconv.Atoi(split[1])
if err != nil {
return fieldpath.PathElement{}, err
}
return fieldpath.PathElement{
Index: &i,
}, nil
case Key:
kv := map[string]json.RawMessage{}
err := json.Unmarshal([]byte(split[1]), &kv)
if err != nil {
return fieldpath.PathElement{}, err
}
fields := value.FieldList{}
for k, v := range kv {
b, err := json.Marshal(v)
if err != nil {
return fieldpath.PathElement{}, err
}
val, err := value.FromJSON(b)
if err != nil {
return fieldpath.PathElement{}, err
}
fields = append(fields, value.Field{
Name: k,
Value: val,
})
}
return fieldpath.PathElement{
Key: &fields,
}, nil
default:
// Ignore unknown key types
return fieldpath.PathElement{}, nil
}
}
// PathElementString serializes a path element
func PathElementString(pe fieldpath.PathElement) (string, error) {
switch {
case pe.FieldName != nil:
return Field + Separator + *pe.FieldName, nil
case pe.Key != nil:
kv := map[string]json.RawMessage{}
for _, k := range *pe.Key {
b, err := value.ToJSON(k.Value)
if err != nil {
return "", err
}
m := json.RawMessage{}
err = json.Unmarshal(b, &m)
if err != nil {
return "", err
}
kv[k.Name] = m
}
b, err := json.Marshal(kv)
if err != nil {
return "", err
}
return Key + ":" + string(b), nil
case pe.Value != nil:
b, err := value.ToJSON(*pe.Value)
if err != nil {
return "", err
}
return Value + ":" + string(b), nil
case pe.Index != nil:
return Index + ":" + strconv.Itoa(*pe.Index), nil
default:
return "", errors.New("Invalid type of path element")
}
}

View File

@@ -0,0 +1,89 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"math/rand"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
)
type skipNonAppliedManager struct {
fieldManager Manager
objectCreater runtime.ObjectCreater
beforeApplyManagerName string
probability float32
}
var _ Manager = &skipNonAppliedManager{}
// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply.
func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager {
return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0)
}
// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply,
// or starts tracking on create with p probability.
func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager {
return &skipNonAppliedManager{
fieldManager: fieldManager,
objectCreater: objectCreater,
beforeApplyManagerName: "before-first-apply",
probability: p,
}
}
// Update implements Manager.
func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
accessor, err := meta.Accessor(liveObj)
if err != nil {
return newObj, managed, nil
}
// If managed fields is empty, we need to determine whether to skip tracking managed fields.
if len(managed.Fields()) == 0 {
// Check if the operation is a create, by checking whether lastObj's UID is empty.
// If the operation is create, P(tracking managed fields) = f.probability
// If the operation is update, skip tracking managed fields, since we already know managed fields is empty.
if len(accessor.GetUID()) == 0 {
if f.probability <= rand.Float32() {
return newObj, managed, nil
}
} else {
return newObj, managed, nil
}
}
return f.fieldManager.Update(liveObj, newObj, managed, manager)
}
// Apply implements Manager.
func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
if len(managed.Fields()) == 0 {
gvk := appliedObj.GetObjectKind().GroupVersionKind()
emptyObj, err := f.objectCreater.New(gvk)
if err != nil {
return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err)
}
liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName)
if err != nil {
return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err)
}
}
return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
}

View File

@@ -0,0 +1,90 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
type stripMetaManager struct {
fieldManager Manager
// stripSet is the list of fields that should never be part of a mangedFields.
stripSet *fieldpath.Set
}
var _ Manager = &stripMetaManager{}
// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset.
func NewStripMetaManager(fieldManager Manager) Manager {
return &stripMetaManager{
fieldManager: fieldManager,
stripSet: fieldpath.NewSet(
fieldpath.MakePathOrDie("apiVersion"),
fieldpath.MakePathOrDie("kind"),
fieldpath.MakePathOrDie("metadata"),
fieldpath.MakePathOrDie("metadata", "name"),
fieldpath.MakePathOrDie("metadata", "namespace"),
fieldpath.MakePathOrDie("metadata", "creationTimestamp"),
fieldpath.MakePathOrDie("metadata", "selfLink"),
fieldpath.MakePathOrDie("metadata", "uid"),
fieldpath.MakePathOrDie("metadata", "clusterName"),
fieldpath.MakePathOrDie("metadata", "generation"),
fieldpath.MakePathOrDie("metadata", "managedFields"),
fieldpath.MakePathOrDie("metadata", "resourceVersion"),
),
}
}
// Update implements Manager.
func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager)
if err != nil {
return nil, nil, err
}
f.stripFields(managed.Fields(), manager)
return newObj, managed, nil
}
// Apply implements Manager.
func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
if err != nil {
return nil, nil, err
}
f.stripFields(managed.Fields(), manager)
return newObj, managed, nil
}
// stripFields removes a predefined set of paths found in typed from managed
func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) {
vs, ok := managed[manager]
if ok {
if vs == nil {
panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager))
}
newSet := vs.Set().Difference(f.stripSet)
if newSet.Empty() {
delete(managed, manager)
} else {
managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied())
}
}
}

View File

@@ -0,0 +1,189 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/merge"
"sigs.k8s.io/structured-merge-diff/v4/typed"
)
type structuredMergeManager struct {
typeConverter TypeConverter
objectConverter runtime.ObjectConvertor
objectDefaulter runtime.ObjectDefaulter
groupVersion schema.GroupVersion
hubVersion schema.GroupVersion
updater merge.Updater
}
var _ Manager = &structuredMergeManager{}
// NewStructuredMergeManager creates a new Manager that merges apply requests
// and update managed fields for other types of requests.
func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) {
if typeConverter == nil {
return nil, fmt.Errorf("typeconverter must not be nil")
}
return &structuredMergeManager{
typeConverter: typeConverter,
objectConverter: objectConverter,
objectDefaulter: objectDefaulter,
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
IgnoredFields: resetFields,
},
}, nil
}
// NewCRDStructuredMergeManager creates a new Manager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) {
return &structuredMergeManager{
typeConverter: typeConverter,
objectConverter: objectConverter,
objectDefaulter: objectDefaulter,
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
IgnoredFields: resetFields,
},
}, nil
}
func objectGVKNN(obj runtime.Object) string {
name := "<unknown>"
namespace := "<unknown>"
if accessor, err := meta.Accessor(obj); err == nil {
name = accessor.GetName()
namespace = accessor.GetNamespace()
}
return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind())
}
// Update implements Manager.
func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
newObjVersioned, err := f.toVersioned(newObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err)
}
liveObjVersioned, err := f.toVersioned(liveObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err)
}
newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err)
}
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err)
}
apiVersion := fieldpath.APIVersion(f.groupVersion.String())
// TODO(apelisse) use the first return value when unions are implemented
_, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager)
if err != nil {
return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err)
}
managed = NewManaged(managedFields, managed.Times())
return newObj, managed, nil
}
// Apply implements Manager.
func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
// Check that the patch object has the same version as the live object
if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion {
return nil, nil,
errors.NewBadRequest(
fmt.Sprintf("Incorrect version specified in apply patch. "+
"Specified patch version: %s, expected: %s",
patchVersion, f.groupVersion))
}
patchObjMeta, err := meta.Accessor(patchObj)
if err != nil {
return nil, nil, fmt.Errorf("couldn't get accessor: %v", err)
}
if patchObjMeta.GetManagedFields() != nil {
return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil")
}
liveObjVersioned, err := f.toVersioned(liveObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err)
}
// Don't allow duplicates in the applied object.
patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err)
}
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates)
if err != nil {
return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err)
}
apiVersion := fieldpath.APIVersion(f.groupVersion.String())
newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force)
if err != nil {
return nil, nil, err
}
managed = NewManaged(managedFields, managed.Times())
if newObjTyped == nil {
return nil, managed, nil
}
newObj, err := f.typeConverter.TypedToObject(newObjTyped)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err)
}
newObjVersioned, err := f.toVersioned(newObj)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err)
}
f.objectDefaulter.Default(newObjVersioned)
newObjUnversioned, err := f.toUnversioned(newObjVersioned)
if err != nil {
return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err)
}
return newObjUnversioned, managed, nil
}
func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) {
return f.objectConverter.ConvertToVersion(obj, f.groupVersion)
}
func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) {
return f.objectConverter.ConvertToVersion(obj, f.hubVersion)
}

View File

@@ -0,0 +1,193 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kube-openapi/pkg/schemaconv"
"k8s.io/kube-openapi/pkg/validation/spec"
smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
"sigs.k8s.io/structured-merge-diff/v4/typed"
"sigs.k8s.io/structured-merge-diff/v4/value"
)
// TypeConverter allows you to convert from runtime.Object to
// typed.TypedValue and the other way around.
type TypeConverter interface {
ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error)
TypedToObject(*typed.TypedValue) (runtime.Object, error)
}
type typeConverter struct {
parser map[schema.GroupVersionKind]*typed.ParseableType
}
var _ TypeConverter = &typeConverter{}
func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) {
typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields)
if err != nil {
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
}
typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}}
tr := indexModels(&typeParser, openapiSpec)
return &typeConverter{parser: tr}, nil
}
func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
gvk := obj.GetObjectKind().GroupVersionKind()
t := c.parser[gvk]
if t == nil {
return nil, NewNoCorrespondingTypeError(gvk)
}
switch o := obj.(type) {
case *unstructured.Unstructured:
return t.FromUnstructured(o.UnstructuredContent(), opts...)
default:
return t.FromStructured(obj, opts...)
}
}
func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
return valueToObject(value.AsValue())
}
type deducedTypeConverter struct{}
// DeducedTypeConverter is a TypeConverter for CRDs that don't have a
// schema. It does implement the same interface though (and create the
// same types of objects), so that everything can still work the same.
// CRDs are merged with all their fields being "atomic" (lists
// included).
func NewDeducedTypeConverter() TypeConverter {
return deducedTypeConverter{}
}
// ObjectToTyped converts an object into a TypedValue with a "deduced type".
func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
switch o := obj.(type) {
case *unstructured.Unstructured:
return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...)
default:
return typed.DeducedParseableType.FromStructured(obj, opts...)
}
}
// TypedToObject transforms the typed value into a runtime.Object. That
// is not specific to deduced type.
func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
return valueToObject(value.AsValue())
}
func valueToObject(val value.Value) (runtime.Object, error) {
vu := val.Unstructured()
switch o := vu.(type) {
case map[string]interface{}:
return &unstructured.Unstructured{Object: o}, nil
default:
return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
}
}
func indexModels(
typeParser *typed.Parser,
openAPISchemas map[string]*spec.Schema,
) map[schema.GroupVersionKind]*typed.ParseableType {
tr := map[schema.GroupVersionKind]*typed.ParseableType{}
for modelName, model := range openAPISchemas {
gvkList := parseGroupVersionKind(model.Extensions)
if len(gvkList) == 0 {
continue
}
parsedType := typeParser.Type(modelName)
for _, gvk := range gvkList {
if len(gvk.Kind) > 0 {
tr[schema.GroupVersionKind(gvk)] = &parsedType
}
}
}
return tr
}
// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one.
func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind {
gvkListResult := []schema.GroupVersionKind{}
// Get the extensions
gvkExtension, ok := extensions["x-kubernetes-group-version-kind"]
if !ok {
return []schema.GroupVersionKind{}
}
// gvk extension must be a list of at least 1 element.
gvkList, ok := gvkExtension.([]interface{})
if !ok {
return []schema.GroupVersionKind{}
}
for _, gvk := range gvkList {
var group, version, kind string
// gvk extension list must be a map with group, version, and
// kind fields
if gvkMap, ok := gvk.(map[interface{}]interface{}); ok {
group, ok = gvkMap["group"].(string)
if !ok {
continue
}
version, ok = gvkMap["version"].(string)
if !ok {
continue
}
kind, ok = gvkMap["kind"].(string)
if !ok {
continue
}
} else if gvkMap, ok := gvk.(map[string]interface{}); ok {
group, ok = gvkMap["group"].(string)
if !ok {
continue
}
version, ok = gvkMap["version"].(string)
if !ok {
continue
}
kind, ok = gvkMap["kind"].(string)
if !ok {
continue
}
} else {
continue
}
gvkListResult = append(gvkListResult, schema.GroupVersionKind{
Group: group,
Version: version,
Kind: kind,
})
}
return gvkListResult
}

View File

@@ -0,0 +1,52 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type versionCheckManager struct {
fieldManager Manager
gvk schema.GroupVersionKind
}
var _ Manager = &versionCheckManager{}
// NewVersionCheckManager creates a manager that makes sure that the
// applied object is in the proper version.
func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager {
return &versionCheckManager{fieldManager: fieldManager, gvk: gvk}
}
// Update implements Manager.
func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
// Nothing to do for updates, this is checked in many other places.
return f.fieldManager.Update(liveObj, newObj, managed, manager)
}
// Apply implements Manager.
func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk {
return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk))
}
return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
}

View File

@@ -0,0 +1,123 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/merge"
"sigs.k8s.io/structured-merge-diff/v4/typed"
)
// versionConverter is an implementation of
// sigs.k8s.io/structured-merge-diff/merge.Converter
type versionConverter struct {
typeConverter TypeConverter
objectConvertor runtime.ObjectConvertor
hubGetter func(from schema.GroupVersion) schema.GroupVersion
}
var _ merge.Converter = &versionConverter{}
// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor.
func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
return &versionConverter{
typeConverter: t,
objectConvertor: o,
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
return schema.GroupVersion{
Group: from.Group,
Version: h.Version,
}
},
}
}
// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor.
func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
return &versionConverter{
typeConverter: t,
objectConvertor: o,
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
return h
},
}
}
// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter
func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
// Convert the smd typed value to a kubernetes object.
objectToConvert, err := v.typeConverter.TypedToObject(object)
if err != nil {
return object, err
}
// Parse the target groupVersion.
groupVersion, err := schema.ParseGroupVersion(string(version))
if err != nil {
return object, err
}
// If attempting to convert to the same version as we already have, just return it.
fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion()
if fromVersion == groupVersion {
return object, nil
}
// Convert to internal
internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion))
if err != nil {
return object, err
}
// Convert the object into the target version
convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion)
if err != nil {
return object, err
}
// Convert the object back to a smd typed value and return it.
return v.typeConverter.ObjectToTyped(convertedObject)
}
// IsMissingVersionError
func (v *versionConverter) IsMissingVersionError(err error) bool {
return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err)
}
type noCorrespondingTypeErr struct {
gvk schema.GroupVersionKind
}
func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error {
return &noCorrespondingTypeErr{gvk: gvk}
}
func (k *noCorrespondingTypeErr) Error() string {
return fmt.Sprintf("no corresponding type for %v", k.gvk)
}
func isNoCorrespondingTypeError(err error) bool {
if err == nil {
return false
}
_, ok := err.(*noCorrespondingTypeErr)
return ok
}

View File

@@ -0,0 +1,261 @@
apiVersion: v1
kind: Node
metadata:
annotations:
container.googleapis.com/instance_id: "123456789321654789"
node.alpha.kubernetes.io/ttl: "0"
volumes.kubernetes.io/controller-managed-attach-detach: "true"
creationTimestamp: "2019-07-09T16:17:29Z"
labels:
kubernetes.io/arch: amd64
beta.kubernetes.io/fluentd-ds-ready: "true"
beta.kubernetes.io/instance-type: n1-standard-4
kubernetes.io/os: linux
cloud.google.com/gke-nodepool: default-pool
cloud.google.com/gke-os-distribution: cos
failure-domain.beta.kubernetes.io/region: us-central1
failure-domain.beta.kubernetes.io/zone: us-central1-b
topology.kubernetes.io/region: us-central1
topology.kubernetes.io/zone: us-central1-b
kubernetes.io/hostname: node-default-pool-something
name: node-default-pool-something
resourceVersion: "211582541"
selfLink: /api/v1/nodes/node-default-pool-something
uid: 0c24d0e1-a265-11e9-abe4-42010a80026b
spec:
podCIDR: 10.0.0.1/24
providerID: some-provider-id-of-some-sort
status:
addresses:
- address: 10.0.0.1
type: InternalIP
- address: 192.168.0.1
type: ExternalIP
- address: node-default-pool-something
type: Hostname
allocatable:
cpu: 3920m
ephemeral-storage: "104638878617"
hugepages-2Mi: "0"
memory: 12700100Ki
pods: "110"
capacity:
cpu: "4"
ephemeral-storage: 202086868Ki
hugepages-2Mi: "0"
memory: 15399364Ki
pods: "110"
conditions:
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:08Z"
message: containerd is functioning properly
reason: FrequentContainerdRestart
status: "False"
type: FrequentContainerdRestart
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: docker overlay2 is functioning properly
reason: CorruptDockerOverlay2
status: "False"
type: CorruptDockerOverlay2
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: node is functioning properly
reason: UnregisterNetDevice
status: "False"
type: FrequentUnregisterNetDevice
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:17:04Z"
message: kernel has no deadlock
reason: KernelHasNoDeadlock
status: "False"
type: KernelDeadlock
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:17:04Z"
message: Filesystem is not read-only
reason: FilesystemIsNotReadOnly
status: "False"
type: ReadonlyFilesystem
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:05Z"
message: kubelet is functioning properly
reason: FrequentKubeletRestart
status: "False"
type: FrequentKubeletRestart
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: docker is functioning properly
reason: FrequentDockerRestart
status: "False"
type: FrequentDockerRestart
- lastHeartbeatTime: "2019-07-09T16:17:47Z"
lastTransitionTime: "2019-07-09T16:17:47Z"
message: RouteController created a route
reason: RouteCreated
status: "False"
type: NetworkUnavailable
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient disk space available
reason: KubeletHasSufficientDisk
status: "False"
type: OutOfDisk
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient memory available
reason: KubeletHasSufficientMemory
status: "False"
type: MemoryPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has no disk pressure
reason: KubeletHasNoDiskPressure
status: "False"
type: DiskPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient PID available
reason: KubeletHasSufficientPID
status: "False"
type: PIDPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:49Z"
message: kubelet is posting ready status. AppArmor enabled
reason: KubeletReady
status: "True"
type: Ready
daemonEndpoints:
kubeletEndpoint:
Port: 10250
images:
- names:
- grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8
- grafana/grafana:4.4.2
sizeBytes: 287008013
- names:
- registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b
- registry.k8s.io/node-problem-detector:v0.4.1
sizeBytes: 286572743
- names:
- grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033
- grafana/grafana:4.2.0
sizeBytes: 277940263
- names:
- influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc
- influxdb:1.2.2
sizeBytes: 223948571
- names:
- gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2
- gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1
sizeBytes: 223242132
- names:
- nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b
- nginx:1.10.1
sizeBytes: 180708613
- names:
- registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73
- registry.k8s.io/fluentd-elasticsearch:v2.0.4
sizeBytes: 135716379
- names:
- nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f
- nginx:latest
sizeBytes: 109357355
- names:
- registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0
- registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3
sizeBytes: 102319441
- names:
- gcr.io/google_containers/kube-proxy:v1.11.10-gke.5
- registry.k8s.io/kube-proxy:v1.11.10-gke.5
sizeBytes: 102279340
- names:
- registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516
- registry.k8s.io/event-exporter:v0.2.3
sizeBytes: 94171943
- names:
- registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662
- registry.k8s.io/prometheus-to-sd:v0.3.1
sizeBytes: 88077694
- names:
- registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff
- registry.k8s.io/fluentd-gcp-scaler:0.5.1
sizeBytes: 86637208
- names:
- registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521
- registry.k8s.io/heapster-amd64:v1.6.0-beta.1
sizeBytes: 76016169
- names:
- registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52
- registry.k8s.io/ingress-glbc-amd64:v1.1.1
sizeBytes: 67801919
- names:
- registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09
- registry.k8s.io/kube-addon-manager:v8.7
sizeBytes: 63322109
- names:
- nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
- nginx:1.10-alpine
sizeBytes: 54042627
- names:
- registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869
- registry.k8s.io/cpvpa-amd64:v0.6.0
sizeBytes: 51785854
- names:
- registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d
- registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
sizeBytes: 49648481
- names:
- registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103
- registry.k8s.io/ip-masq-agent-amd64:v2.1.1
sizeBytes: 49612505
- names:
- registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8
- registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10
sizeBytes: 49549457
- names:
- registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450
- registry.k8s.io/rescheduler:v0.4.0
sizeBytes: 48973149
- names:
- registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc
- registry.k8s.io/event-exporter:v0.2.4
sizeBytes: 47261019
- names:
- registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848
- registry.k8s.io/coredns:1.1.3
sizeBytes: 45587362
- names:
- prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74
- prom/prometheus:v1.1.3
sizeBytes: 45493941
nodeInfo:
architecture: amd64
bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61
containerRuntimeVersion: docker://17.3.2
kernelVersion: 4.14.127+
kubeProxyVersion: v1.11.10-gke.5
kubeletVersion: v1.11.10-gke.5
machineID: 1739555e5b231057f0f9a0b5fa29511b
operatingSystem: linux
osImage: Container-Optimized OS from Google
systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B
volumesAttached:
- devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
- devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
volumesInUse:
- kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
- kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049

View File

@@ -0,0 +1,121 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: some-app
plugin1: some-value
plugin2: some-value
plugin3: some-value
plugin4: some-value
name: some-name
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: some-name
uid: 0a9d2b9e-779e-11e7-b422-42010a8001be
spec:
containers:
- args:
- one
- two
- three
- four
- five
- six
- seven
- eight
- nine
env:
- name: VAR_3
valueFrom:
secretKeyRef:
key: some-other-key
name: some-oher-name
- name: VAR_2
valueFrom:
secretKeyRef:
key: other-key
name: other-name
- name: VAR_1
valueFrom:
secretKeyRef:
key: some-key
name: some-name
image: some-image-name
imagePullPolicy: IfNotPresent
name: some-name
resources:
requests:
cpu: '0'
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-hu5jz
readOnly: true
dnsPolicy: ClusterFirst
nodeName: node-name
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-hu5jz
secret:
defaultMode: 420
secretName: default-token-hu5jz
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:31:18Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:41:59Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: null
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:31:18Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376
image: some-image-name
imageID: docker-pullable://some-image-id
lastState:
terminated:
containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565
exitCode: 255
finishedAt: '2019-07-08T09:39:09Z'
reason: Error
startedAt: '2019-07-08T09:38:54Z'
name: name
ready: true
restartCount: 6
state:
running:
startedAt: '2019-07-08T09:41:59Z'
hostIP: 10.0.0.1
phase: Running
podIP: 10.0.0.1
qosClass: BestEffort
startTime: '2019-07-08T09:31:18Z'

View File

@@ -0,0 +1,174 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package managedfields
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
var (
scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"}
replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas")
)
// ResourcePathMappings maps a group/version to its replicas path. The
// assumption is that all the paths correspond to leaf fields.
type ResourcePathMappings map[string]fieldpath.Path
// ScaleHandler manages the conversion of managed fields between a main
// resource and the scale subresource
type ScaleHandler struct {
parentEntries []metav1.ManagedFieldsEntry
groupVersion schema.GroupVersion
mappings ResourcePathMappings
}
// NewScaleHandler creates a new ScaleHandler
func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler {
return &ScaleHandler{
parentEntries: parentEntries,
groupVersion: groupVersion,
mappings: mappings,
}
}
// ToSubresource filter the managed fields of the main resource and convert
// them so that they can be handled by scale.
// For the managed fields that have a replicas path it performs two changes:
// 1. APIVersion is changed to the APIVersion of the scale subresource
// 2. Replicas path of the main resource is transformed to the replicas path of
// the scale subresource
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
managed, err := internal.DecodeManagedFields(h.parentEntries)
if err != nil {
return nil, err
}
f := fieldpath.ManagedFields{}
t := map[string]*metav1.Time{}
for manager, versionedSet := range managed.Fields() {
path, ok := h.mappings[string(versionedSet.APIVersion())]
// Skip the entry if the APIVersion is unknown
if !ok || path == nil {
continue
}
if versionedSet.Set().Has(path) {
newVersionedSet := fieldpath.NewVersionedSet(
fieldpath.NewSet(replicasPathInScale),
fieldpath.APIVersion(scaleGroupVersion.String()),
versionedSet.Applied(),
)
f[manager] = newVersionedSet
t[manager] = managed.Times()[manager]
}
}
return managedFieldsEntries(internal.NewManaged(f, t))
}
// ToParent merges `scaleEntries` with the entries of the main resource and
// transforms them accordingly
func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) {
decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries)
if err != nil {
return nil, err
}
parentFields := decodedParentEntries.Fields()
decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries)
if err != nil {
return nil, err
}
scaleFields := decodedScaleEntries.Fields()
f := fieldpath.ManagedFields{}
t := map[string]*metav1.Time{}
for manager, versionedSet := range parentFields {
// Get the main resource "replicas" path
path, ok := h.mappings[string(versionedSet.APIVersion())]
// Drop the entry if the APIVersion is unknown.
if !ok {
continue
}
// If the parent entry does not have the replicas path or it is nil, just
// keep it as it is. The path is nil for Custom Resources without scale
// subresource.
if path == nil || !versionedSet.Set().Has(path) {
f[manager] = versionedSet
t[manager] = decodedParentEntries.Times()[manager]
continue
}
if _, ok := scaleFields[manager]; !ok {
// "Steal" the replicas path from the main resource entry
newSet := versionedSet.Set().Difference(fieldpath.NewSet(path))
if !newSet.Empty() {
newVersionedSet := fieldpath.NewVersionedSet(
newSet,
versionedSet.APIVersion(),
versionedSet.Applied(),
)
f[manager] = newVersionedSet
t[manager] = decodedParentEntries.Times()[manager]
}
} else {
// Field wasn't stolen, let's keep the entry as it is.
f[manager] = versionedSet
t[manager] = decodedParentEntries.Times()[manager]
delete(scaleFields, manager)
}
}
for manager, versionedSet := range scaleFields {
if !versionedSet.Set().Has(replicasPathInScale) {
continue
}
newVersionedSet := fieldpath.NewVersionedSet(
fieldpath.NewSet(h.mappings[h.groupVersion.String()]),
fieldpath.APIVersion(h.groupVersion.String()),
versionedSet.Applied(),
)
f[manager] = newVersionedSet
t[manager] = decodedParentEntries.Times()[manager]
}
return managedFieldsEntries(internal.NewManaged(f, t))
}
func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) {
obj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := internal.EncodeObjectManagedFields(obj, entries); err != nil {
return nil, err
}
accessor, err := meta.Accessor(obj)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
return accessor.GetManagedFields(), nil
}

View File

@@ -0,0 +1,47 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package managedfields
import (
"k8s.io/apimachinery/pkg/util/managedfields/internal"
"k8s.io/kube-openapi/pkg/validation/spec"
)
// TypeConverter allows you to convert from runtime.Object to
// typed.TypedValue and the other way around.
type TypeConverter = internal.TypeConverter
// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't
// have a schema. It does implement the same interface though (and
// create the same types of objects), so that everything can still work
// the same. CRDs are merged with all their fields being "atomic" (lists
// included).
func NewDeducedTypeConverter() TypeConverter {
return internal.NewDeducedTypeConverter()
}
// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas.
// This will automatically find the proper version of the object, and the
// corresponding schema information.
// The keys to the map must be consistent with the names
// used by Refs within the schemas.
// The schemas should conform to the Kubernetes Structural Schema OpenAPI
// restrictions found in docs:
// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema
func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) {
return internal.NewTypeConverter(openapiSpec, preserveUnknownFields)
}

View File

@@ -20,6 +20,7 @@ import (
"errors"
"net"
"reflect"
"strings"
"syscall"
)
@@ -47,6 +48,11 @@ func IsConnectionReset(err error) bool {
return false
}
// Returns if the given err is "http2: client connection lost" error.
func IsHTTP2ConnectionLost(err error) bool {
return err != nil && strings.Contains(err.Error(), "http2: client connection lost")
}
// Returns if the given err is "connection refused" error
func IsConnectionRefused(err error) bool {
var errno syscall.Errno

122
vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go generated vendored Normal file
View File

@@ -0,0 +1,122 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/third_party/forked/golang/netutil"
"k8s.io/klog/v2"
)
// DialURL will dial the specified URL using the underlying dialer held by the passed
// RoundTripper. The primary use of this method is to support proxying upgradable connections.
// For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https.
// If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the
// TLSConfig of the http.Transport
func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) {
dialAddr := netutil.CanonicalAddr(url)
dialer, err := utilnet.DialerFor(transport)
if err != nil {
klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err)
}
switch url.Scheme {
case "http":
if dialer != nil {
return dialer(ctx, "tcp", dialAddr)
}
var d net.Dialer
return d.DialContext(ctx, "tcp", dialAddr)
case "https":
// Get the tls config from the transport if we recognize it
tlsConfig, err := utilnet.TLSClientConfig(transport)
if err != nil {
klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err)
}
if dialer != nil {
// We have a dialer; use it to open the connection, then
// create a tls client using the connection.
netConn, err := dialer(ctx, "tcp", dialAddr)
if err != nil {
return nil, err
}
if tlsConfig == nil {
// tls.Client requires non-nil config
klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify")
// tls.Handshake() requires ServerName or InsecureSkipVerify
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
} else if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
// tls.HandshakeContext() requires ServerName or InsecureSkipVerify
// infer the ServerName from the hostname we're connecting to.
inferredHost := dialAddr
if host, _, err := net.SplitHostPort(dialAddr); err == nil {
inferredHost = host
}
// Make a copy to avoid polluting the provided config
tlsConfigCopy := tlsConfig.Clone()
tlsConfigCopy.ServerName = inferredHost
tlsConfig = tlsConfigCopy
}
// Since this method is primarily used within a "Connection: Upgrade" call we assume the caller is
// going to write HTTP/1.1 request to the wire. http2 should not be allowed in the TLSConfig.NextProtos,
// so we explicitly set that here. We only do this check if the TLSConfig support http/1.1.
if supportsHTTP11(tlsConfig.NextProtos) {
tlsConfig = tlsConfig.Clone()
tlsConfig.NextProtos = []string{"http/1.1"}
}
tlsConn := tls.Client(netConn, tlsConfig)
if err := tlsConn.HandshakeContext(ctx); err != nil {
netConn.Close()
return nil, err
}
return tlsConn, nil
} else {
// Dial.
tlsDialer := tls.Dialer{
Config: tlsConfig,
}
return tlsDialer.DialContext(ctx, "tcp", dialAddr)
}
default:
return nil, fmt.Errorf("unknown scheme: %s", url.Scheme)
}
}
func supportsHTTP11(nextProtos []string) bool {
if len(nextProtos) == 0 {
return true
}
for _, proto := range nextProtos {
if proto == "http/1.1" {
return true
}
}
return false
}

18
vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package proxy provides transport and upgrade support for proxies.
package proxy // import "k8s.io/apimachinery/pkg/util/proxy"

272
vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go generated vendored Normal file
View File

@@ -0,0 +1,272 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"bytes"
"compress/flate"
"compress/gzip"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
)
// atomsToAttrs states which attributes of which tags require URL substitution.
// Sources: http://www.w3.org/TR/REC-html40/index/attributes.html
//
// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1
var atomsToAttrs = map[atom.Atom]sets.String{
atom.A: sets.NewString("href"),
atom.Applet: sets.NewString("codebase"),
atom.Area: sets.NewString("href"),
atom.Audio: sets.NewString("src"),
atom.Base: sets.NewString("href"),
atom.Blockquote: sets.NewString("cite"),
atom.Body: sets.NewString("background"),
atom.Button: sets.NewString("formaction"),
atom.Command: sets.NewString("icon"),
atom.Del: sets.NewString("cite"),
atom.Embed: sets.NewString("src"),
atom.Form: sets.NewString("action"),
atom.Frame: sets.NewString("longdesc", "src"),
atom.Head: sets.NewString("profile"),
atom.Html: sets.NewString("manifest"),
atom.Iframe: sets.NewString("longdesc", "src"),
atom.Img: sets.NewString("longdesc", "src", "usemap"),
atom.Input: sets.NewString("src", "usemap", "formaction"),
atom.Ins: sets.NewString("cite"),
atom.Link: sets.NewString("href"),
atom.Object: sets.NewString("classid", "codebase", "data", "usemap"),
atom.Q: sets.NewString("cite"),
atom.Script: sets.NewString("src"),
atom.Source: sets.NewString("src"),
atom.Video: sets.NewString("poster", "src"),
// TODO: css URLs hidden in style elements.
}
// Transport is a transport for text/html content that replaces URLs in html
// content with the prefix of the proxy server
type Transport struct {
Scheme string
Host string
PathPrepend string
http.RoundTripper
}
// RoundTrip implements the http.RoundTripper interface
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// Add reverse proxy headers.
forwardedURI := path.Join(t.PathPrepend, req.URL.EscapedPath())
if strings.HasSuffix(req.URL.Path, "/") {
forwardedURI = forwardedURI + "/"
}
req.Header.Set("X-Forwarded-Uri", forwardedURI)
if len(t.Host) > 0 {
req.Header.Set("X-Forwarded-Host", t.Host)
}
if len(t.Scheme) > 0 {
req.Header.Set("X-Forwarded-Proto", t.Scheme)
}
rt := t.RoundTripper
if rt == nil {
rt = http.DefaultTransport
}
resp, err := rt.RoundTrip(req)
if err != nil {
return nil, errors.NewServiceUnavailable(fmt.Sprintf("error trying to reach service: %v", err))
}
if redirect := resp.Header.Get("Location"); redirect != "" {
targetURL, err := url.Parse(redirect)
if err != nil {
return nil, errors.NewInternalError(fmt.Errorf("error trying to parse Location header: %v", err))
}
resp.Header.Set("Location", t.rewriteURL(targetURL, req.URL, req.Host))
return resp, nil
}
cType := resp.Header.Get("Content-Type")
cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0])
if cType != "text/html" {
// Do nothing, simply pass through
return resp, nil
}
return t.rewriteResponse(req, resp)
}
var _ = net.RoundTripperWrapper(&Transport{})
func (rt *Transport) WrappedRoundTripper() http.RoundTripper {
return rt.RoundTripper
}
// rewriteURL rewrites a single URL to go through the proxy, if the URL refers
// to the same host as sourceURL, which is the page on which the target URL
// occurred, or if the URL matches the sourceRequestHost.
func (t *Transport) rewriteURL(url *url.URL, sourceURL *url.URL, sourceRequestHost string) string {
// Example:
// When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/),
// the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The
// sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the
// URL is sought, which can be different from sourceURL.Host. For example, if user sends the
// request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/),
// sourceRequestHost is "localhost:8001".
//
// If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host
// or sourceRequestHost, we should not consider the returned URL to be a completely different host.
// It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the
// necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/).
isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost
isRelative := !strings.HasPrefix(url.Path, "/")
if isDifferentHost || isRelative {
return url.String()
}
// Do not rewrite scheme and host if the Transport has empty scheme and host
// when targetURL already contains the sourceRequestHost
if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") {
url.Scheme = t.Scheme
url.Host = t.Host
}
origPath := url.Path
// Do not rewrite URL if the sourceURL already contains the necessary prefix.
if strings.HasPrefix(url.Path, t.PathPrepend) {
return url.String()
}
url.Path = path.Join(t.PathPrepend, url.Path)
if strings.HasSuffix(origPath, "/") {
// Add back the trailing slash, which was stripped by path.Join().
url.Path += "/"
}
return url.String()
}
// rewriteHTML scans the HTML for tags with url-valued attributes, and updates
// those values with the urlRewriter function. The updated HTML is output to the
// writer.
func rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(*url.URL) string) error {
// Note: This assumes the content is UTF-8.
tokenizer := html.NewTokenizer(reader)
var err error
for err == nil {
tokenType := tokenizer.Next()
switch tokenType {
case html.ErrorToken:
err = tokenizer.Err()
case html.StartTagToken, html.SelfClosingTagToken:
token := tokenizer.Token()
if urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok {
for i, attr := range token.Attr {
if urlAttrs.Has(attr.Key) {
url, err := url.Parse(attr.Val)
if err != nil {
// Do not rewrite the URL if it isn't valid. It is intended not
// to error here to prevent the inability to understand the
// content of the body to cause a fatal error.
continue
}
token.Attr[i].Val = urlRewriter(url)
}
}
}
_, err = writer.Write([]byte(token.String()))
default:
_, err = writer.Write(tokenizer.Raw())
}
}
if err != io.EOF {
return err
}
return nil
}
// rewriteResponse modifies an HTML response by updating absolute links referring
// to the original host to instead refer to the proxy transport.
func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) {
origBody := resp.Body
defer origBody.Close()
newContent := &bytes.Buffer{}
var reader io.Reader = origBody
var writer io.Writer = newContent
encoding := resp.Header.Get("Content-Encoding")
switch encoding {
case "gzip":
var err error
reader, err = gzip.NewReader(reader)
if err != nil {
return nil, fmt.Errorf("errorf making gzip reader: %v", err)
}
gzw := gzip.NewWriter(writer)
defer gzw.Close()
writer = gzw
case "deflate":
var err error
reader = flate.NewReader(reader)
flw, err := flate.NewWriter(writer, flate.BestCompression)
if err != nil {
return nil, fmt.Errorf("errorf making flate writer: %v", err)
}
defer func() {
flw.Close()
flw.Flush()
}()
writer = flw
case "":
// This is fine
default:
// Some encoding we don't understand-- don't try to parse this
klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding)
return resp, nil
}
urlRewriter := func(targetUrl *url.URL) string {
return t.rewriteURL(targetUrl, req.URL, req.Host)
}
err := rewriteHTML(reader, writer, urlRewriter)
if err != nil {
klog.Errorf("Failed to rewrite URLs: %v", err)
return resp, err
}
resp.Body = io.NopCloser(newContent)
// Update header node with new content-length
// TODO: Remove any hash/signature headers here?
resp.Header.Del("Content-Length")
resp.ContentLength = int64(newContent.Len())
return resp, err
}

View File

@@ -0,0 +1,556 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/httpstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/mxk/go-flowrate/flowrate"
"k8s.io/klog/v2"
)
// UpgradeRequestRoundTripper provides an additional method to decorate a request
// with any authentication or other protocol level information prior to performing
// an upgrade on the server. Any response will be handled by the intercepting
// proxy.
type UpgradeRequestRoundTripper interface {
http.RoundTripper
// WrapRequest takes a valid HTTP request and returns a suitably altered version
// of request with any HTTP level values required to complete the request half of
// an upgrade on the server. It does not get a chance to see the response and
// should bypass any request side logic that expects to see the response.
WrapRequest(*http.Request) (*http.Request, error)
}
// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade
type UpgradeAwareHandler struct {
// UpgradeRequired will reject non-upgrade connections if true.
UpgradeRequired bool
// Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server
// for upgrade requests unless UseRequestLocationOnUpgrade is true.
Location *url.URL
// AppendLocationPath determines if the original path of the Location should be appended to the upstream proxy request path
AppendLocationPath bool
// Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used
Transport http.RoundTripper
// UpgradeTransport, if specified, will be used as the backend transport when upgrade requests are provided.
// This allows clients to disable HTTP/2.
UpgradeTransport UpgradeRequestRoundTripper
// WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting)
WrapTransport bool
// UseRequestLocation will use the incoming request URL when talking to the backend server.
UseRequestLocation bool
// UseLocationHost overrides the HTTP host header in requests to the backend server to use the Host from Location.
// This will override the req.Host field of a request, while UseRequestLocation will override the req.URL field
// of a request. The req.URL.Host specifies the server to connect to, while the req.Host field
// specifies the Host header value to send in the HTTP request. If this is false, the incoming req.Host header will
// just be forwarded to the backend server.
UseLocationHost bool
// FlushInterval controls how often the standard HTTP proxy will flush content from the upstream.
FlushInterval time.Duration
// MaxBytesPerSec controls the maximum rate for an upstream connection. No rate is imposed if the value is zero.
MaxBytesPerSec int64
// Responder is passed errors that occur while setting up proxying.
Responder ErrorResponder
// Reject to forward redirect response
RejectForwardingRedirects bool
}
const defaultFlushInterval = 200 * time.Millisecond
// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular
// error format.
type ErrorResponder interface {
Error(w http.ResponseWriter, req *http.Request, err error)
}
// SimpleErrorResponder is the legacy implementation of ErrorResponder for callers that only
// service a single request/response per proxy.
type SimpleErrorResponder interface {
Error(err error)
}
func NewErrorResponder(r SimpleErrorResponder) ErrorResponder {
return simpleResponder{r}
}
type simpleResponder struct {
responder SimpleErrorResponder
}
func (r simpleResponder) Error(w http.ResponseWriter, req *http.Request, err error) {
r.responder.Error(err)
}
// upgradeRequestRoundTripper implements proxy.UpgradeRequestRoundTripper.
type upgradeRequestRoundTripper struct {
http.RoundTripper
upgrader http.RoundTripper
}
var (
_ UpgradeRequestRoundTripper = &upgradeRequestRoundTripper{}
_ utilnet.RoundTripperWrapper = &upgradeRequestRoundTripper{}
)
// WrappedRoundTripper returns the round tripper that a caller would use.
func (rt *upgradeRequestRoundTripper) WrappedRoundTripper() http.RoundTripper {
return rt.RoundTripper
}
// WriteToRequest calls the nested upgrader and then copies the returned request
// fields onto the passed request.
func (rt *upgradeRequestRoundTripper) WrapRequest(req *http.Request) (*http.Request, error) {
resp, err := rt.upgrader.RoundTrip(req)
if err != nil {
return nil, err
}
return resp.Request, nil
}
// onewayRoundTripper captures the provided request - which is assumed to have
// been modified by other round trippers - and then returns a fake response.
type onewayRoundTripper struct{}
// RoundTrip returns a simple 200 OK response that captures the provided request.
func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return &http.Response{
Status: "200 OK",
StatusCode: http.StatusOK,
Body: io.NopCloser(&bytes.Buffer{}),
Request: req,
}, nil
}
// MirrorRequest is a round tripper that can be called to get back the calling request as
// the core round tripper in a chain.
var MirrorRequest http.RoundTripper = onewayRoundTripper{}
// NewUpgradeRequestRoundTripper takes two round trippers - one for the underlying TCP connection, and
// one that is able to write headers to an HTTP request. The request rt is used to set the request headers
// and that is written to the underlying connection rt.
func NewUpgradeRequestRoundTripper(connection, request http.RoundTripper) UpgradeRequestRoundTripper {
return &upgradeRequestRoundTripper{
RoundTripper: connection,
upgrader: request,
}
}
// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing
func normalizeLocation(location *url.URL) *url.URL {
normalized, _ := url.Parse(location.String())
if len(normalized.Scheme) == 0 {
normalized.Scheme = "http"
}
return normalized
}
// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning
// errors to the caller.
func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler {
return &UpgradeAwareHandler{
Location: normalizeLocation(location),
Transport: transport,
WrapTransport: wrapTransport,
UpgradeRequired: upgradeRequired,
FlushInterval: defaultFlushInterval,
Responder: responder,
}
}
func proxyRedirectsforRootPath(path string, w http.ResponseWriter, req *http.Request) bool {
redirect := false
method := req.Method
// From pkg/genericapiserver/endpoints/handlers/proxy.go#ServeHTTP:
// Redirect requests with an empty path to a location that ends with a '/'
// This is essentially a hack for https://issue.k8s.io/4958.
// Note: Keep this code after tryUpgrade to not break that flow.
if len(path) == 0 && (method == http.MethodGet || method == http.MethodHead) {
var queryPart string
if len(req.URL.RawQuery) > 0 {
queryPart = "?" + req.URL.RawQuery
}
w.Header().Set("Location", req.URL.Path+"/"+queryPart)
w.WriteHeader(http.StatusMovedPermanently)
redirect = true
}
return redirect
}
// ServeHTTP handles the proxy request
func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if h.tryUpgrade(w, req) {
return
}
if h.UpgradeRequired {
h.Responder.Error(w, req, errors.NewBadRequest("Upgrade request required"))
return
}
loc := *h.Location
loc.RawQuery = req.URL.RawQuery
// If original request URL ended in '/', append a '/' at the end of the
// of the proxy URL
if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") {
loc.Path += "/"
}
proxyRedirect := proxyRedirectsforRootPath(loc.Path, w, req)
if proxyRedirect {
return
}
if h.Transport == nil || h.WrapTransport {
h.Transport = h.defaultProxyTransport(req.URL, h.Transport)
}
// WithContext creates a shallow clone of the request with the same context.
newReq := req.WithContext(req.Context())
newReq.Header = utilnet.CloneHeader(req.Header)
if !h.UseRequestLocation {
newReq.URL = &loc
}
if h.UseLocationHost {
// exchanging req.Host with the backend location is necessary for backends that act on the HTTP host header (e.g. API gateways),
// because req.Host has preference over req.URL.Host in filling this header field
newReq.Host = h.Location.Host
}
// create the target location to use for the reverse proxy
reverseProxyLocation := &url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host}
if h.AppendLocationPath {
reverseProxyLocation.Path = h.Location.Path
}
proxy := httputil.NewSingleHostReverseProxy(reverseProxyLocation)
proxy.Transport = h.Transport
proxy.FlushInterval = h.FlushInterval
proxy.ErrorLog = log.New(noSuppressPanicError{}, "", log.LstdFlags)
if h.RejectForwardingRedirects {
oldModifyResponse := proxy.ModifyResponse
proxy.ModifyResponse = func(response *http.Response) error {
code := response.StatusCode
if code >= 300 && code <= 399 && len(response.Header.Get("Location")) > 0 {
// close the original response
response.Body.Close()
msg := "the backend attempted to redirect this request, which is not permitted"
// replace the response
*response = http.Response{
StatusCode: http.StatusBadGateway,
Status: fmt.Sprintf("%d %s", response.StatusCode, http.StatusText(response.StatusCode)),
Body: io.NopCloser(strings.NewReader(msg)),
ContentLength: int64(len(msg)),
}
} else {
if oldModifyResponse != nil {
if err := oldModifyResponse(response); err != nil {
return err
}
}
}
return nil
}
}
if h.Responder != nil {
// if an optional error interceptor/responder was provided wire it
// the custom responder might be used for providing a unified error reporting
// or supporting retry mechanisms by not sending non-fatal errors to the clients
proxy.ErrorHandler = h.Responder.Error
}
proxy.ServeHTTP(w, newReq)
}
type noSuppressPanicError struct{}
func (noSuppressPanicError) Write(p []byte) (n int, err error) {
// skip "suppressing panic for copyResponse error in test; copy error" error message
// that ends up in CI tests on each kube-apiserver termination as noise and
// everybody thinks this is fatal.
if strings.Contains(string(p), "suppressing panic") {
return len(p), nil
}
return os.Stderr.Write(p)
}
// tryUpgrade returns true if the request was handled.
func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool {
if !httpstream.IsUpgradeRequest(req) {
klog.V(6).Infof("Request was not an upgrade")
return false
}
var (
backendConn net.Conn
rawResponse []byte
err error
)
location := *h.Location
if h.UseRequestLocation {
location = *req.URL
location.Scheme = h.Location.Scheme
location.Host = h.Location.Host
if h.AppendLocationPath {
location.Path = singleJoiningSlash(h.Location.Path, location.Path)
}
}
clone := utilnet.CloneRequest(req)
// Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy
// handles this in the non-upgrade path.
utilnet.AppendForwardedForHeader(clone)
klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header)
if h.UseLocationHost {
clone.Host = h.Location.Host
}
clone.URL = &location
backendConn, err = h.DialForUpgrade(clone)
if err != nil {
klog.V(6).Infof("Proxy connection error: %v", err)
h.Responder.Error(w, req, err)
return true
}
defer backendConn.Close()
// determine the http response code from the backend by reading from rawResponse+backendConn
backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn))
if err != nil {
klog.V(6).Infof("Proxy connection error: %v", err)
h.Responder.Error(w, req, err)
return true
}
if len(headerBytes) > len(rawResponse) {
// we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend
rawResponse = headerBytes
}
// If the backend did not upgrade the request, return an error to the client. If the response was
// an error, the error is forwarded directly after the connection is hijacked. Otherwise, just
// return a generic error here.
if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 {
err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode)
klog.Errorf("Proxy upgrade error: %v", err)
h.Responder.Error(w, req, err)
return true
}
// Once the connection is hijacked, the ErrorResponder will no longer work, so
// hijacking should be the last step in the upgrade.
requestHijacker, ok := w.(http.Hijacker)
if !ok {
klog.V(6).Infof("Unable to hijack response writer: %T", w)
h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w))
return true
}
requestHijackedConn, _, err := requestHijacker.Hijack()
if err != nil {
klog.V(6).Infof("Unable to hijack response: %v", err)
h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err))
return true
}
defer requestHijackedConn.Close()
if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols {
// If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection.
klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode)
// set read/write deadlines
deadline := time.Now().Add(10 * time.Second)
backendConn.SetReadDeadline(deadline)
requestHijackedConn.SetWriteDeadline(deadline)
// write the response to the client
err := backendHTTPResponse.Write(requestHijackedConn)
if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
klog.Errorf("Error proxying data from backend to client: %v", err)
}
// Indicate we handled the request
return true
}
// Forward raw response bytes back to client.
if len(rawResponse) > 0 {
klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse))
if _, err = requestHijackedConn.Write(rawResponse); err != nil {
utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err))
}
}
// Proxy the connection. This is bidirectional, so we need a goroutine
// to copy in each direction. Once one side of the connection exits, we
// exit the function which performs cleanup and in the process closes
// the other half of the connection in the defer.
writerComplete := make(chan struct{})
readerComplete := make(chan struct{})
go func() {
var writer io.WriteCloser
if h.MaxBytesPerSec > 0 {
writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec)
} else {
writer = backendConn
}
_, err := io.Copy(writer, requestHijackedConn)
if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
klog.Errorf("Error proxying data from client to backend: %v", err)
}
close(writerComplete)
}()
go func() {
var reader io.ReadCloser
if h.MaxBytesPerSec > 0 {
reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec)
} else {
reader = backendConn
}
_, err := io.Copy(requestHijackedConn, reader)
if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
klog.Errorf("Error proxying data from backend to client: %v", err)
}
close(readerComplete)
}()
// Wait for one half the connection to exit. Once it does the defer will
// clean up the other half of the connection.
select {
case <-writerComplete:
case <-readerComplete:
}
klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header)
return true
}
// FIXME: Taken from net/http/httputil/reverseproxy.go as singleJoiningSlash is not exported to be re-used.
// See-also: https://github.com/golang/go/issues/44290
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error) {
if h.UpgradeTransport == nil {
return dial(req, h.Transport)
}
updatedReq, err := h.UpgradeTransport.WrapRequest(req)
if err != nil {
return nil, err
}
return dial(updatedReq, h.UpgradeTransport)
}
// getResponseCode reads a http response from the given reader, returns the response,
// the bytes read from the reader, and any error encountered
func getResponse(r io.Reader) (*http.Response, []byte, error) {
rawResponse := bytes.NewBuffer(make([]byte, 0, 256))
// Save the bytes read while reading the response headers into the rawResponse buffer
resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil)
if err != nil {
return nil, nil, err
}
// return the http response and the raw bytes consumed from the reader in the process
return resp, rawResponse.Bytes(), nil
}
// dial dials the backend at req.URL and writes req to it.
func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) {
conn, err := DialURL(req.Context(), req.URL, transport)
if err != nil {
return nil, fmt.Errorf("error dialing backend: %v", err)
}
if err = req.Write(conn); err != nil {
conn.Close()
return nil, fmt.Errorf("error sending request: %v", err)
}
return conn, err
}
func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper {
scheme := url.Scheme
host := url.Host
suffix := h.Location.Path
if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") {
suffix += "/"
}
pathPrepend := strings.TrimSuffix(url.Path, suffix)
rewritingTransport := &Transport{
Scheme: scheme,
Host: host,
PathPrepend: pathPrepend,
RoundTripper: internalTransport,
}
return &corsRemovingTransport{
RoundTripper: rewritingTransport,
}
}
// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers
// from the internal response.
// Implements pkg/util/net.RoundTripperWrapper
type corsRemovingTransport struct {
http.RoundTripper
}
var _ = utilnet.RoundTripperWrapper(&corsRemovingTransport{})
func (rt *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := rt.RoundTripper.RoundTrip(req)
if err != nil {
return nil, err
}
removeCORSHeaders(resp)
return resp, nil
}
func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper {
return rt.RoundTripper
}
// removeCORSHeaders strip CORS headers sent from the backend
// This should be called on all responses before returning
func removeCORSHeaders(resp *http.Response) {
resp.Header.Del("Access-Control-Allow-Credentials")
resp.Header.Del("Access-Control-Allow-Headers")
resp.Header.Del("Access-Control-Allow-Methods")
resp.Header.Del("Access-Control-Allow-Origin")
}

View File

@@ -46,8 +46,22 @@ const (
// adds support for exit codes.
StreamProtocolV4Name = "v4.channel.k8s.io"
// The subprotocol "v5.channel.k8s.io" is used for remote command
// attachment/execution. It is the 5th version of the subprotocol and
// adds support for a CLOSE signal.
StreamProtocolV5Name = "v5.channel.k8s.io"
NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
ExitCodeCauseType = metav1.CauseType("ExitCode")
// RemoteCommand stream identifiers. The first three identifiers (for STDIN,
// STDOUT, STDERR) are the same as their file descriptors.
StreamStdIn = 0
StreamStdOut = 1
StreamStdErr = 2
StreamErr = 3
StreamResize = 4
StreamClose = 255
)
var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}

View File

@@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct {
// OnError will block if it is called more often than the embedded period time.
// This will prevent overly tight hot error loops.
func (r *rudimentaryErrorBackoff) OnError(error) {
now := time.Now() // start the timer before acquiring the lock
r.lastErrorTimeLock.Lock()
defer r.lastErrorTimeLock.Unlock()
d := time.Since(r.lastErrorTime)
if d < r.minPeriod {
// If the time moves backwards for any reason, do nothing
time.Sleep(r.minPeriod - d)
}
d := now.Sub(r.lastErrorTime)
r.lastErrorTime = time.Now()
r.lastErrorTimeLock.Unlock()
// Do not sleep with the lock held because that causes all callers of HandleError to block.
// We only want the current goroutine to block.
// A negative or zero duration causes time.Sleep to return immediately.
// If the time moves backwards for any reason, do nothing.
time.Sleep(r.minPeriod - d)
}
// GetCaller returns the caller of the function that calls it.

View File

@@ -64,6 +64,20 @@ func (s Set[T]) Delete(items ...T) Set[T] {
return s
}
// Clear empties the set.
// It is preferable to replace the set with a newly constructed set,
// but not all callers can do that (when there are other references to the map).
// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
// can't be cleared because NaN can't be removed.
// For sets containing items of a type that is reflexive for ==,
// this is optimized to a single call to runtime.mapclear().
func (s Set[T]) Clear() Set[T] {
for key := range s {
delete(s, key)
}
return s
}
// Has returns true if and only if item is contained in the set.
func (s Set[T]) Has(item T) bool {
_, contained := s[item]

View File

@@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error {
// NotSupported returns a *Error indicating "unsupported value".
// This is used to report unknown values for enumerated fields (e.g. a list of
// valid values).
func NotSupported(field *Path, value interface{}, validValues []string) *Error {
func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error {
detail := ""
if len(validValues) > 0 {
quotedValues := make([]string, len(validValues))
for i, v := range validValues {
quotedValues[i] = strconv.Quote(v)
quotedValues[i] = strconv.Quote(fmt.Sprint(v))
}
detail = "supported values: " + strings.Join(quotedValues, ", ")
}

View File

@@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string {
errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
}
if !dns1123LabelRegexp.MatchString(value) {
errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
if dns1123SubdomainRegexp.MatchString(value) {
// It was a valid subdomain and not a valid label. Since we
// already checked length, it must be dots.
errs = append(errs, "must not contain dots")
} else {
errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
}
}
return errs
}

502
vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go generated vendored Normal file
View File

@@ -0,0 +1,502 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"math"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
// The initial duration.
Duration time.Duration
// Duration is multiplied by factor each iteration, if factor is not zero
// and the limits imposed by Steps and Cap have not been reached.
// Should not be negative.
// The jitter does not contribute to the updates to the duration parameter.
Factor float64
// The sleep at each iteration is the duration plus an additional
// amount chosen uniformly at random from the interval between
// zero and `jitter*duration`.
Jitter float64
// The remaining number of iterations in which the duration
// parameter may change (but progress can be stopped earlier by
// hitting the cap). If not positive, the duration is not
// changed. Used for exponential backoff in combination with
// Factor and Cap.
Steps int
// A limit on revised values of the duration parameter. If a
// multiplication by the factor parameter would make the duration
// exceed the cap then the duration is set to the cap and the
// steps parameter is set to zero.
Cap time.Duration
}
// Step returns an amount of time to sleep determined by the original
// Duration and Jitter. The backoff is mutated to update its Steps and
// Duration. A nil Backoff always has a zero-duration step.
func (b *Backoff) Step() time.Duration {
if b == nil {
return 0
}
var nextDuration time.Duration
nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter)
return nextDuration
}
// DelayFunc returns a function that will compute the next interval to
// wait given the arguments in b. It does not mutate the original backoff
// but the function is safe to use only from a single goroutine.
func (b Backoff) DelayFunc() DelayFunc {
steps := b.Steps
duration := b.Duration
cap := b.Cap
factor := b.Factor
jitter := b.Jitter
return func() time.Duration {
var nextDuration time.Duration
// jitter is applied per step and is not cumulative over multiple steps
nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter)
return nextDuration
}
}
// Timer returns a timer implementation appropriate to this backoff's parameters
// for use with wait functions.
func (b Backoff) Timer() Timer {
if b.Steps > 1 || b.Jitter != 0 {
return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()}
}
if b.Duration > 0 {
return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration}
}
return newNoopTimer()
}
// delay implements the core delay algorithm used in this package.
func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) {
// when steps is non-positive, do not alter the base duration
if steps < 1 {
if jitter > 0 {
return Jitter(duration, jitter), duration, 0
}
return duration, duration, 0
}
steps--
// calculate the next step's interval
if factor != 0 {
next = time.Duration(float64(duration) * factor)
if cap > 0 && next > cap {
next = cap
steps = 0
}
} else {
next = duration
}
// add jitter for this step
if jitter > 0 {
duration = Jitter(duration, jitter)
}
return duration, next, steps
}
// DelayWithReset returns a DelayFunc that will return the appropriate next interval to
// wait. Every resetInterval the backoff parameters are reset to their initial state.
// This method is safe to invoke from multiple goroutines, but all calls will advance
// the backoff state when Factor is set. If Factor is zero, this method is the same as
// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is
// zero no backoff will be performed as the same calling DelayFunc with a zero factor
// and steps.
func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc {
if b.Factor <= 0 {
return b.DelayFunc()
}
if resetInterval <= 0 {
b.Steps = 0
b.Factor = 0
return b.DelayFunc()
}
return (&backoffManager{
backoff: b,
initialBackoff: b,
resetInterval: resetInterval,
clock: c,
lastStart: c.Now(),
timer: nil,
}).Step
}
// Until loops until stop channel is closed, running f every period.
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// UntilWithContext loops until context is done, running f every period.
//
// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor and with sliding = true (which means the timer
// for period starts after the f completes).
func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, true)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// NonSlidingUntilWithContext loops until context is done, running f every
// period.
//
// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor, with sliding = false (meaning the timer for period
// starts at the same time as the function starts).
func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, false)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
}
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
var t clock.Timer
for {
select {
case <-stopCh:
return
default:
}
if !sliding {
t = backoff.Backoff()
}
func() {
defer runtime.HandleCrash()
f()
}()
if sliding {
t = backoff.Backoff()
}
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger t.C and stopCh, and t.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
if !t.Stop() {
<-t.C()
}
return
case <-t.C():
}
}
}
// JitterUntilWithContext loops until context is done, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Cancel context to stop. f may not be invoked if context is already expired.
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
}
// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
type backoffManager struct {
backoff Backoff
initialBackoff Backoff
resetInterval time.Duration
clock clock.Clock
lock sync.Mutex
lastStart time.Time
timer clock.Timer
}
// Step returns the expected next duration to wait.
func (b *backoffManager) Step() time.Duration {
b.lock.Lock()
defer b.lock.Unlock()
switch {
case b.resetInterval == 0:
b.backoff = b.initialBackoff
case b.clock.Now().Sub(b.lastStart) > b.resetInterval:
b.backoff = b.initialBackoff
b.lastStart = b.clock.Now()
}
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer
// for exponential backoff. The returned timer must be drained before calling Backoff() the second
// time.
func (b *backoffManager) Backoff() clock.Timer {
b.lock.Lock()
defer b.lock.Unlock()
if b.timer == nil {
b.timer = b.clock.NewTimer(b.Step())
} else {
b.timer.Reset(b.Step())
}
return b.timer
}
// Timer returns a new Timer instance that shares the clock and the reset behavior with all other
// timers.
func (b *backoffManager) Timer() Timer {
return DelayFunc(b.Step).Timer(b.clock)
}
// BackoffManager manages backoff with a particular scheme based on its underlying implementation.
type BackoffManager interface {
// Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not
// safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff
// until Timer.C() drains. If the second Backoff() is called before the timer from the first
// Backoff() call finishes, the first timer will NOT be drained and result in undetermined
// behavior.
Backoff() clock.Timer
}
// Deprecated: Will be removed when the legacy polling functions are removed.
type exponentialBackoffManagerImpl struct {
backoff *Backoff
backoffTimer clock.Timer
lastBackoffStart time.Time
initialBackoff time.Duration
backoffResetDuration time.Duration
clock clock.Clock
}
// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
// This backoff manager is used to reduce load during upstream unhealthiness.
//
// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then
// invoke Timer() when calling wait.BackoffUntil.
//
// Instead of:
//
// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock)
// ...
// wait.BackoffUntil(..., bm.Backoff, ...)
//
// Use:
//
// delayFn := wait.Backoff{
// Duration: init,
// Cap: max,
// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument
// Factor: factor,
// Jitter: jitter,
// }.DelayWithReset(reset, clock)
// wait.BackoffUntil(..., delayFn.Timer(), ...)
func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
return &exponentialBackoffManagerImpl{
backoff: &Backoff{
Duration: initBackoff,
Factor: backoffFactor,
Jitter: jitter,
// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
// what we ideally need here, we set it to max int and assume we will never use up the steps
Steps: math.MaxInt32,
Cap: maxBackoff,
},
backoffTimer: nil,
initialBackoff: initBackoff,
lastBackoffStart: c.Now(),
backoffResetDuration: resetDuration,
clock: c,
}
}
func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
b.backoff.Steps = math.MaxInt32
b.backoff.Duration = b.initialBackoff
}
b.lastBackoffStart = b.clock.Now()
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
// The returned timer must be drained before calling Backoff() the second time
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
if b.backoffTimer == nil {
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
} else {
b.backoffTimer.Reset(b.getNextBackoff())
}
return b.backoffTimer
}
// Deprecated: Will be removed when the legacy polling functions are removed.
type jitteredBackoffManagerImpl struct {
clock clock.Clock
duration time.Duration
jitter float64
backoffTimer clock.Timer
}
// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
// is negative, backoff will not be jittered.
//
// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
// Backoff struct and invoke Timer() when calling wait.BackoffUntil.
//
// Instead of:
//
// bm := wait.NewJitteredBackoffManager(duration, jitter, clock)
// ...
// wait.BackoffUntil(..., bm.Backoff, ...)
//
// Use:
//
// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...)
func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
return &jitteredBackoffManagerImpl{
clock: c,
duration: duration,
jitter: jitter,
backoffTimer: nil,
}
}
func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
jitteredPeriod := j.duration
if j.jitter > 0.0 {
jitteredPeriod = Jitter(j.duration, j.jitter)
}
return jitteredPeriod
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
// The returned timer must be drained before calling Backoff() the second time
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
backoff := j.getNextBackoff()
if j.backoffTimer == nil {
j.backoffTimer = j.clock.NewTimer(backoff)
} else {
j.backoffTimer.Reset(backoff)
}
return j.backoffTimer
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
// to determine the length of the sleep and adjust Duration and Steps.
// Stops and returns as soon as:
// 1. the condition check returns true or an error,
// 2. `backoff.Steps` checks of the condition have been done, or
// 3. a sleep truncated by the cap on duration has been completed.
// In case (1) the returned error is what the condition function returned.
// In all other cases, ErrWaitTimeout is returned.
//
// Since backoffs are often subject to cancellation, we recommend using
// ExponentialBackoffWithContext and passing a context to the method.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
for backoff.Steps > 0 {
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
return err
}
if backoff.Steps == 1 {
break
}
time.Sleep(backoff.Step())
}
return ErrWaitTimeout
}
// ExponentialBackoffWithContext repeats a condition check with exponential backoff.
// It immediately returns an error if the condition returns an error, the context is cancelled
// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout).
// If an error is returned by the condition the backoff stops immediately. The condition will
// never be invoked more than backoff.Steps times.
func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error {
for backoff.Steps > 0 {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok {
return err
}
if backoff.Steps == 1 {
break
}
waitBeforeRetry := backoff.Step()
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(waitBeforeRetry):
}
}
return ErrWaitTimeout
}

51
vendor/k8s.io/apimachinery/pkg/util/wait/delay.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"sync"
"time"
"k8s.io/utils/clock"
)
// DelayFunc returns the next time interval to wait.
type DelayFunc func() time.Duration
// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes.
// Use Backoff{...}.Timer() for simple delays and more efficient timers.
func (fn DelayFunc) Timer(c clock.Clock) Timer {
return &variableTimer{fn: fn, new: c.NewTimer}
}
// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This
// offers all of the functionality of the methods in this package.
func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error {
return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition)
}
// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that
// wish to share a single delay timer.
func (fn DelayFunc) Concurrent() DelayFunc {
var lock sync.Mutex
return func() time.Duration {
lock.Lock()
defer lock.Unlock()
return fn()
}
}

96
vendor/k8s.io/apimachinery/pkg/util/wait/error.go generated vendored Normal file
View File

@@ -0,0 +1,96 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"errors"
)
// ErrWaitTimeout is returned when the condition was not satisfied in time.
//
// Deprecated: This type will be made private in favor of Interrupted()
// for checking errors or ErrorInterrupted(err) for returning a wrapped error.
var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition"))
// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or
// Until loop exited for any reason besides the condition returning true or an
// error. A loop is considered interrupted if the calling context is cancelled,
// the context reaches its deadline, or a backoff reaches its maximum allowed
// steps.
//
// Callers should use this method instead of comparing the error value directly to
// ErrWaitTimeout, as methods that cancel a context may not return that error.
//
// Instead of:
//
// err := wait.Poll(...)
// if err == wait.ErrWaitTimeout {
// log.Infof("Wait for operation exceeded")
// } else ...
//
// Use:
//
// err := wait.Poll(...)
// if wait.Interrupted(err) {
// log.Infof("Wait for operation exceeded")
// } else ...
func Interrupted(err error) bool {
switch {
case errors.Is(err, errWaitTimeout),
errors.Is(err, context.Canceled),
errors.Is(err, context.DeadlineExceeded):
return true
default:
return false
}
}
// errInterrupted
type errInterrupted struct {
cause error
}
// ErrorInterrupted returns an error that indicates the wait was ended
// early for a given reason. If no cause is provided a generic error
// will be used but callers are encouraged to provide a real cause for
// clarity in debugging.
func ErrorInterrupted(cause error) error {
switch cause.(type) {
case errInterrupted:
// no need to wrap twice since errInterrupted is only needed
// once in a chain
return cause
default:
return errInterrupted{cause}
}
}
// errWaitTimeout is the private version of the previous ErrWaitTimeout
// and is private to prevent direct comparison. Use ErrorInterrupted(err)
// to get an error that will return true for Interrupted(err).
var errWaitTimeout = errInterrupted{}
func (e errInterrupted) Unwrap() error { return e.cause }
func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout }
func (e errInterrupted) Error() string {
if e.cause == nil {
// returns the same error message as historical behavior
return "timed out waiting for the condition"
}
return e.cause.Error()
}

95
vendor/k8s.io/apimachinery/pkg/util/wait/loop.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
)
// loopConditionUntilContext executes the provided condition at intervals defined by
// the provided timer until the provided context is cancelled, the condition returns
// true, or the condition returns an error. If sliding is true, the period is computed
// after condition runs. If it is false then period includes the runtime for condition.
// If immediate is false the first delay happens before any call to condition, if
// immediate is true the condition will be invoked before waiting and guarantees that
// the condition is invoked at least once, regardless of whether the context has been
// cancelled. The returned error is the error returned by the last condition or the
// context error if the context was terminated.
//
// This is the common loop construct for all polling in the wait package.
func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error {
defer t.Stop()
var timeCh <-chan time.Time
doneCh := ctx.Done()
if !sliding {
timeCh = t.C()
}
// if immediate is true the condition is
// guaranteed to be executed at least once,
// if we haven't requested immediate execution, delay once
if immediate {
if ok, err := func() (bool, error) {
defer runtime.HandleCrash()
return condition(ctx)
}(); err != nil || ok {
return err
}
}
if sliding {
timeCh = t.C()
}
for {
// Wait for either the context to be cancelled or the next invocation be called
select {
case <-doneCh:
return ctx.Err()
case <-timeCh:
}
// IMPORTANT: Because there is no channel priority selection in golang
// it is possible for very short timers to "win" the race in the previous select
// repeatedly even when the context has been canceled. We therefore must
// explicitly check for context cancellation on every loop and exit if true to
// guarantee that we don't invoke condition more than once after context has
// been cancelled.
if err := ctx.Err(); err != nil {
return err
}
if !sliding {
t.Next()
}
if ok, err := func() (bool, error) {
defer runtime.HandleCrash()
return condition(ctx)
}(); err != nil || ok {
return err
}
if sliding {
t.Next()
}
}
}

315
vendor/k8s.io/apimachinery/pkg/util/wait/poll.go generated vendored Normal file
View File

@@ -0,0 +1,315 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"time"
)
// PollUntilContextCancel tries a condition func until it returns true, an error, or the context
// is cancelled or hits a deadline. condition will be invoked after the first interval if the
// context is not cancelled first. The returned error will be from ctx.Err(), the condition's
// err return value, or nil. If invoking condition takes longer than interval the next condition
// will be invoked immediately. When using very short intervals, condition may be invoked multiple
// times before a context cancellation is detected. If immediate is true, condition will be
// invoked before waiting and guarantees that condition is invoked at least once, regardless of
// whether the context has been cancelled.
func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error {
return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
}
// PollUntilContextTimeout will terminate polling after timeout duration by setting a context
// timeout. This is provided as a convenience function for callers not currently executing under
// a deadline and is equivalent to:
//
// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition)
//
// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying
// inline usage. All other behavior is identical to PollUntilContextCancel.
func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error {
deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
defer deadlineCancel()
return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
//
// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
}
// PollWithContext tries a condition func until it returns true, an error,
// or when the context expires or the timeout is reached, whichever
// happens first.
//
// PollWithContext always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
//
// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, timeout), condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
}
// PollUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollUntilWithContext always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollInfiniteWithContext tries a condition func until it returns true or an error
//
// PollInfiniteWithContext always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
//
// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
}
// PollImmediateWithContext tries a condition func until it returns true, an error,
// or the timeout is reached or the specified context expires, whichever happens first.
//
// PollImmediateWithContext always checks 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
//
// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, timeout), condition)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
}
// PollImmediateUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollImmediateInfiniteWithContext tries a condition func until it returns true
// or an error or the specified context gets cancelled or expired.
//
// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// Internally used, each of the public 'Poll*' function defined in this
// package should invoke this internal function with appropriate parameters.
// ctx: the context specified by the caller, for infinite polling pass
// a context that never gets cancelled or expired.
// immediate: if true, the 'condition' will be invoked before waiting for the interval,
// in this case 'condition' will always be invoked at least once.
// wait: user specified WaitFunc function that controls at what interval the condition
// function should be invoked periodically and whether it is bound by a timeout.
// condition: user specified ConditionWithContextFunc function.
//
// Deprecated: will be removed in favor of loopConditionUntilContext.
func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error {
if immediate {
done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
if err != nil {
return err
}
if done {
return nil
}
}
select {
case <-ctx.Done():
// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
// methods instead
return ErrWaitTimeout
default:
return waitForWithContext(ctx, wait, condition)
}
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
// it would be the caller's responsibility to close the done channel.
// Failure to do so would result in a leaked goroutine.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
//
// Deprecated: Will be removed in a future release.
func poller(interval, timeout time.Duration) waitWithContextFunc {
return waitWithContextFunc(func(ctx context.Context) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
tick := time.NewTicker(interval)
defer tick.Stop()
var after <-chan time.Time
if timeout != 0 {
// time.After is more convenient, but it
// potentially leaves timers around much longer
// than necessary if we exit early.
timer := time.NewTimer(timeout)
after = timer.C
defer timer.Stop()
}
for {
select {
case <-tick.C:
// If the consumer isn't ready for this signal drop it and
// check the other channels.
select {
case ch <- struct{}{}:
default:
}
case <-after:
return
case <-ctx.Done():
return
}
}
}()
return ch
})
}

121
vendor/k8s.io/apimachinery/pkg/util/wait/timer.go generated vendored Normal file
View File

@@ -0,0 +1,121 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"time"
"k8s.io/utils/clock"
)
// Timer abstracts how wait functions interact with time runtime efficiently. Test
// code may implement this interface directly but package consumers are encouraged
// to use the Backoff type as the primary mechanism for acquiring a Timer. The
// interface is a simplification of clock.Timer to prevent misuse. Timers are not
// expected to be safe for calls from multiple goroutines.
type Timer interface {
// C returns a channel that will receive a struct{} each time the timer fires.
// The channel should not be waited on after Stop() is invoked. It is allowed
// to cache the returned value of C() for the lifetime of the Timer.
C() <-chan time.Time
// Next is invoked by wait functions to signal timers that the next interval
// should begin. You may only use Next() if you have drained the channel C().
// You should not call Next() after Stop() is invoked.
Next()
// Stop releases the timer. It is safe to invoke if no other methods have been
// called.
Stop()
}
type noopTimer struct {
closedCh <-chan time.Time
}
// newNoopTimer creates a timer with a unique channel to avoid contention
// for the channel's lock across multiple unrelated timers.
func newNoopTimer() noopTimer {
ch := make(chan time.Time)
close(ch)
return noopTimer{closedCh: ch}
}
func (t noopTimer) C() <-chan time.Time {
return t.closedCh
}
func (noopTimer) Next() {}
func (noopTimer) Stop() {}
type variableTimer struct {
fn DelayFunc
t clock.Timer
new func(time.Duration) clock.Timer
}
func (t *variableTimer) C() <-chan time.Time {
if t.t == nil {
d := t.fn()
t.t = t.new(d)
}
return t.t.C()
}
func (t *variableTimer) Next() {
if t.t == nil {
return
}
d := t.fn()
t.t.Reset(d)
}
func (t *variableTimer) Stop() {
if t.t == nil {
return
}
t.t.Stop()
t.t = nil
}
type fixedTimer struct {
interval time.Duration
t clock.Ticker
new func(time.Duration) clock.Ticker
}
func (t *fixedTimer) C() <-chan time.Time {
if t.t == nil {
t.t = t.new(t.interval)
}
return t.t.C()
}
func (t *fixedTimer) Next() {
// no-op for fixed timers
}
func (t *fixedTimer) Stop() {
if t.t == nil {
return
}
t.t.Stop()
t.t = nil
}
var (
// RealTimer can be passed to methods that need a clock.Timer.
RealTimer = clock.RealClock{}.NewTimer
)
var (
// internalClock is used for test injection of clocks
internalClock = clock.RealClock{}
)

View File

@@ -18,14 +18,11 @@ package wait
import (
"context"
"errors"
"math"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// For any test of the style:
@@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// UntilWithContext loops until context is done, running f every period.
//
// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor and with sliding = true (which means the timer
// for period starts after the f completes).
func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, true)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// NonSlidingUntilWithContext loops until context is done, running f every
// period.
//
// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
// with zero jitter factor, with sliding = false (meaning the timer for period
// starts at the same time as the function starts).
func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
JitterUntilWithContext(ctx, f, period, 0.0, false)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
}
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
var t clock.Timer
for {
select {
case <-stopCh:
return
default:
}
if !sliding {
t = backoff.Backoff()
}
func() {
defer runtime.HandleCrash()
f()
}()
if sliding {
t = backoff.Backoff()
}
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger t.C and stopCh, and t.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
if !t.Stop() {
<-t.C()
}
return
case <-t.C():
}
}
}
// JitterUntilWithContext loops until context is done, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Cancel context to stop. f may not be invoked if context is already expired.
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
@@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
return wait
}
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
@@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc {
}
}
// runConditionWithCrashProtection runs a ConditionFunc with crash protection
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext())
// ContextForChannel provides a context that will be treated as cancelled
// when the provided parentCh is closed. The implementation returns
// context.Canceled for Err() if and only if the parentCh is closed.
func ContextForChannel(parentCh <-chan struct{}) context.Context {
return channelContext{stopCh: parentCh}
}
// runConditionWithCrashProtectionWithContext runs a
// ConditionWithContextFunc with crash protection.
var _ context.Context = channelContext{}
// channelContext will behave as if the context were cancelled when stopCh is
// closed.
type channelContext struct {
stopCh <-chan struct{}
}
func (c channelContext) Done() <-chan struct{} { return c.stopCh }
func (c channelContext) Err() error {
select {
case <-c.stopCh:
return context.Canceled
default:
return nil
}
}
func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
func (c channelContext) Value(key any) any { return nil }
// runConditionWithCrashProtection runs a ConditionFunc with crash protection.
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
defer runtime.HandleCrash()
return condition()
}
// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc
// with crash protection.
//
// Deprecated: Will be removed when the legacy polling methods are removed.
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
defer runtime.HandleCrash()
return condition(ctx)
}
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
// The initial duration.
Duration time.Duration
// Duration is multiplied by factor each iteration, if factor is not zero
// and the limits imposed by Steps and Cap have not been reached.
// Should not be negative.
// The jitter does not contribute to the updates to the duration parameter.
Factor float64
// The sleep at each iteration is the duration plus an additional
// amount chosen uniformly at random from the interval between
// zero and `jitter*duration`.
Jitter float64
// The remaining number of iterations in which the duration
// parameter may change (but progress can be stopped earlier by
// hitting the cap). If not positive, the duration is not
// changed. Used for exponential backoff in combination with
// Factor and Cap.
Steps int
// A limit on revised values of the duration parameter. If a
// multiplication by the factor parameter would make the duration
// exceed the cap then the duration is set to the cap and the
// steps parameter is set to zero.
Cap time.Duration
}
// Step (1) returns an amount of time to sleep determined by the
// original Duration and Jitter and (2) mutates the provided Backoff
// to update its Steps and Duration.
func (b *Backoff) Step() time.Duration {
if b.Steps < 1 {
if b.Jitter > 0 {
return Jitter(b.Duration, b.Jitter)
}
return b.Duration
}
b.Steps--
duration := b.Duration
// calculate the next step
if b.Factor != 0 {
b.Duration = time.Duration(float64(b.Duration) * b.Factor)
if b.Cap > 0 && b.Duration > b.Cap {
b.Duration = b.Cap
b.Steps = 0
}
}
if b.Jitter > 0 {
duration = Jitter(duration, b.Jitter)
}
return duration
}
// ContextForChannel derives a child context from a parent channel.
//
// The derived context's Done channel is closed when the returned cancel function
// is called or when the parent channel is closed, whichever happens first.
//
// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-parentCh:
cancel()
case <-ctx.Done():
}
}()
return ctx, cancel
}
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
// undetermined behavior.
// The BackoffManager is supposed to be called in a single-threaded environment.
type BackoffManager interface {
Backoff() clock.Timer
}
type exponentialBackoffManagerImpl struct {
backoff *Backoff
backoffTimer clock.Timer
lastBackoffStart time.Time
initialBackoff time.Duration
backoffResetDuration time.Duration
clock clock.Clock
}
// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
// This backoff manager is used to reduce load during upstream unhealthiness.
func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
return &exponentialBackoffManagerImpl{
backoff: &Backoff{
Duration: initBackoff,
Factor: backoffFactor,
Jitter: jitter,
// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
// what we ideally need here, we set it to max int and assume we will never use up the steps
Steps: math.MaxInt32,
Cap: maxBackoff,
},
backoffTimer: nil,
initialBackoff: initBackoff,
lastBackoffStart: c.Now(),
backoffResetDuration: resetDuration,
clock: c,
}
}
func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
b.backoff.Steps = math.MaxInt32
b.backoff.Duration = b.initialBackoff
}
b.lastBackoffStart = b.clock.Now()
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
// The returned timer must be drained before calling Backoff() the second time
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
if b.backoffTimer == nil {
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
} else {
b.backoffTimer.Reset(b.getNextBackoff())
}
return b.backoffTimer
}
type jitteredBackoffManagerImpl struct {
clock clock.Clock
duration time.Duration
jitter float64
backoffTimer clock.Timer
}
// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
// is negative, backoff will not be jittered.
func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
return &jitteredBackoffManagerImpl{
clock: c,
duration: duration,
jitter: jitter,
backoffTimer: nil,
}
}
func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
jitteredPeriod := j.duration
if j.jitter > 0.0 {
jitteredPeriod = Jitter(j.duration, j.jitter)
}
return jitteredPeriod
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
// The returned timer must be drained before calling Backoff() the second time
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
backoff := j.getNextBackoff()
if j.backoffTimer == nil {
j.backoffTimer = j.clock.NewTimer(backoff)
} else {
j.backoffTimer.Reset(backoff)
}
return j.backoffTimer
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
// to determine the length of the sleep and adjust Duration and Steps.
// Stops and returns as soon as:
// 1. the condition check returns true or an error,
// 2. `backoff.Steps` checks of the condition have been done, or
// 3. a sleep truncated by the cap on duration has been completed.
// In case (1) the returned error is what the condition function returned.
// In all other cases, ErrWaitTimeout is returned.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
for backoff.Steps > 0 {
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
return err
}
if backoff.Steps == 1 {
break
}
time.Sleep(backoff.Step())
}
return ErrWaitTimeout
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
}
// PollWithContext tries a condition func until it returns true, an error,
// or when the context expires or the timeout is reached, whichever
// happens first.
//
// PollWithContext always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, timeout), condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := ContextForChannel(stopCh)
defer cancel()
return PollUntilWithContext(ctx, interval, condition.WithContext())
}
// PollUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollUntilWithContext always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollInfiniteWithContext tries a condition func until it returns true or an error
//
// PollInfiniteWithContext always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
}
// PollImmediateWithContext tries a condition func until it returns true, an error,
// or the timeout is reached or the specified context expires, whichever happens first.
//
// PollImmediateWithContext always checks 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, timeout), condition)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := ContextForChannel(stopCh)
defer cancel()
return PollImmediateUntilWithContext(ctx, interval, condition.WithContext())
}
// PollImmediateUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollImmediateInfiniteWithContext tries a condition func until it returns true
// or an error or the specified context gets cancelled or expired.
//
// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// Internally used, each of the public 'Poll*' function defined in this
// package should invoke this internal function with appropriate parameters.
// ctx: the context specified by the caller, for infinite polling pass
// a context that never gets cancelled or expired.
// immediate: if true, the 'condition' will be invoked before waiting for the interval,
// in this case 'condition' will always be invoked at least once.
// wait: user specified WaitFunc function that controls at what interval the condition
// function should be invoked periodically and whether it is bound by a timeout.
// condition: user specified ConditionWithContextFunc function.
func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error {
if immediate {
done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
if err != nil {
return err
}
if done {
return nil
}
}
select {
case <-ctx.Done():
// returning ctx.Err() will break backward compatibility
return ErrWaitTimeout
default:
return WaitForWithContext(ctx, wait, condition)
}
}
// WaitFunc creates a channel that receives an item every time a test
// waitFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
//
// Deprecated: Will be removed in a future release in favor of
// loopConditionUntilContext.
type waitFunc func(done <-chan struct{}) <-chan struct{}
// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc
func (w WaitFunc) WithContext() WaitWithContextFunc {
func (w waitFunc) WithContext() waitWithContextFunc {
return func(ctx context.Context) <-chan struct{} {
return w(ctx.Done())
}
}
// WaitWithContextFunc creates a channel that receives an item every time a test
// waitWithContextFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
//
// When the specified context gets cancelled or expires the function
// stops sending item and returns immediately.
type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
//
// Deprecated: Will be removed in a future release in favor of
// loopConditionUntilContext.
type waitWithContextFunc func(ctx context.Context) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
// waitForWithContext continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed. If the channel is closed
// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
//
// If 'fn' returns an error the loop ends and that error is returned. If
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever
// returning true.
//
// When the done channel is closed, because the golang `select` statement is
// "uniform pseudo-random", the `fn` might still run one or multiple time,
// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
ctx, cancel := ContextForChannel(done)
defer cancel()
return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext())
}
// WaitForWithContext continually checks 'fn' as driven by 'wait'.
//
// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn'
// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn'
// once for every value placed on the channel and once more when the
// channel is closed. If the channel is closed and 'fn'
// returns false without error, WaitForWithContext returns ErrWaitTimeout.
// returns false without error, waitForWithContext returns ErrWaitTimeout.
//
// If 'fn' returns an error the loop ends and that error is returned. If
// 'fn' returns true the loop ends and nil is returned.
@@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
//
// When the ctx.Done() channel is closed, because the golang `select` statement is
// "uniform pseudo-random", the `fn` might still run one or multiple times,
// though eventually `WaitForWithContext` will return.
func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error {
// though eventually `waitForWithContext` will return.
//
// Deprecated: Will be removed in a future release in favor of
// loopConditionUntilContext.
func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error {
waitCtx, cancel := context.WithCancel(context.Background())
defer cancel()
c := wait(waitCtx)
@@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit
return ErrWaitTimeout
}
case <-ctx.Done():
// returning ctx.Err() will break backward compatibility
// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
// methods instead
return ErrWaitTimeout
}
}
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
// it would be the caller's responsibility to close the done channel.
// Failure to do so would result in a leaked goroutine.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitWithContextFunc {
return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
tick := time.NewTicker(interval)
defer tick.Stop()
var after <-chan time.Time
if timeout != 0 {
// time.After is more convenient, but it
// potentially leaves timers around much longer
// than necessary if we exit early.
timer := time.NewTimer(timeout)
after = timer.C
defer timer.Stop()
}
for {
select {
case <-tick.C:
// If the consumer isn't ready for this signal drop it and
// check the other channels.
select {
case ch <- struct{}{}:
default:
}
case <-after:
return
case <-ctx.Done():
return
}
}
}()
return ch
})
}
// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never
// exceeds the deadline specified by the request context.
func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error {
for backoff.Steps > 0 {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
return err
}
if backoff.Steps == 1 {
break
}
waitBeforeRetry := backoff.Step()
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(waitBeforeRetry):
}
}
return ErrWaitTimeout
}