mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-12 22:47:09 +08:00
vendor: bump k8s to v0.25.4
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
76
vendor/k8s.io/apimachinery/pkg/runtime/allocator.go
generated
vendored
Normal file
76
vendor/k8s.io/apimachinery/pkg/runtime/allocator.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AllocatorPool simply stores Allocator objects to avoid additional memory allocations
|
||||
// by caching created but unused items for later reuse, relieving pressure on the garbage collector.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator)
|
||||
// defer runtime.AllocatorPool.Put(memoryAllocator)
|
||||
//
|
||||
// A note for future:
|
||||
//
|
||||
// consider introducing multiple pools for storing buffers of different sizes
|
||||
// perhaps this could allow us to be more efficient.
|
||||
var AllocatorPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Allocator{}
|
||||
},
|
||||
}
|
||||
|
||||
// Allocator knows how to allocate memory
|
||||
// It exists to make the cost of object serialization cheaper.
|
||||
// In some cases, it allows for allocating memory only once and then reusing it.
|
||||
// This approach puts less load on GC and leads to less fragmented memory in general.
|
||||
type Allocator struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
var _ MemoryAllocator = &Allocator{}
|
||||
|
||||
// Allocate reserves memory for n bytes only if the underlying array doesn't have enough capacity
|
||||
// otherwise it returns previously allocated block of memory.
|
||||
//
|
||||
// Note that the returned array is not zeroed, it is the caller's
|
||||
// responsibility to clean the memory if needed.
|
||||
func (a *Allocator) Allocate(n uint64) []byte {
|
||||
if uint64(cap(a.buf)) >= n {
|
||||
a.buf = a.buf[:n]
|
||||
return a.buf
|
||||
}
|
||||
// grow the buffer
|
||||
size := uint64(2*cap(a.buf)) + n
|
||||
a.buf = make([]byte, size)
|
||||
a.buf = a.buf[:n]
|
||||
return a.buf
|
||||
}
|
||||
|
||||
// SimpleAllocator a wrapper around make([]byte)
|
||||
// conforms to the MemoryAllocator interface
|
||||
type SimpleAllocator struct{}
|
||||
|
||||
var _ MemoryAllocator = &SimpleAllocator{}
|
||||
|
||||
func (sa *SimpleAllocator) Allocate(n uint64) []byte {
|
||||
return make([]byte, n)
|
||||
}
|
13
vendor/k8s.io/apimachinery/pkg/runtime/codec.go
generated
vendored
13
vendor/k8s.io/apimachinery/pkg/runtime/codec.go
generated
vendored
@ -344,14 +344,15 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi
|
||||
// Incoming kinds that match the provided groupKinds are preferred.
|
||||
// Kind may be empty in the provided group kind, in which case any kind will match.
|
||||
// Examples:
|
||||
// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
|
||||
//
|
||||
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
|
||||
// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
|
||||
//
|
||||
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
|
||||
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
|
||||
//
|
||||
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
|
||||
// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
|
||||
func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
|
||||
return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true}
|
||||
}
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
// TODO: verify that the correct external version is chosen on encode...
|
||||
func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error {
|
||||
if _, err := Encode(c, internalType); err != nil {
|
||||
return fmt.Errorf("Internal type not encodable: %v", err)
|
||||
return fmt.Errorf("internal type not encodable: %v", err)
|
||||
}
|
||||
for _, et := range externalTypes {
|
||||
typeMeta := TypeMeta{
|
||||
|
262
vendor/k8s.io/apimachinery/pkg/runtime/converter.go
generated
vendored
262
vendor/k8s.io/apimachinery/pkg/runtime/converter.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -109,21 +110,141 @@ type unstructuredConverter struct {
|
||||
// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external
|
||||
// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection.
|
||||
func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter {
|
||||
return NewTestUnstructuredConverterWithValidation(comparison)
|
||||
}
|
||||
|
||||
// NewTestUnstrucutredConverterWithValidation allows for access to
|
||||
// FromUnstructuredWithValidation from within tests.
|
||||
func NewTestUnstructuredConverterWithValidation(comparison conversion.Equalities) *unstructuredConverter {
|
||||
return &unstructuredConverter{
|
||||
mismatchDetection: true,
|
||||
comparison: comparison,
|
||||
}
|
||||
}
|
||||
|
||||
// FromUnstructured converts an object from map[string]interface{} representation into a concrete type.
|
||||
// fromUnstructuredContext provides options for informing the converter
|
||||
// the state of its recursive walk through the conversion process.
|
||||
type fromUnstructuredContext struct {
|
||||
// isInlined indicates whether the converter is currently in
|
||||
// an inlined field or not to determine whether it should
|
||||
// validate the matchedKeys yet or only collect them.
|
||||
// This should only be set from `structFromUnstructured`
|
||||
isInlined bool
|
||||
// matchedKeys is a stack of the set of all fields that exist in the
|
||||
// concrete go type of the object being converted into.
|
||||
// This should only be manipulated via `pushMatchedKeyTracker`,
|
||||
// `recordMatchedKey`, or `popAndVerifyMatchedKeys`
|
||||
matchedKeys []map[string]struct{}
|
||||
// parentPath collects the path that the conversion
|
||||
// takes as it traverses the unstructured json map.
|
||||
// It is used to report the full path to any unknown
|
||||
// fields that the converter encounters.
|
||||
parentPath []string
|
||||
// returnUnknownFields indicates whether or not
|
||||
// unknown field errors should be collected and
|
||||
// returned to the caller
|
||||
returnUnknownFields bool
|
||||
// unknownFieldErrors are the collection of
|
||||
// the full path to each unknown field in the
|
||||
// object.
|
||||
unknownFieldErrors []error
|
||||
}
|
||||
|
||||
// pushMatchedKeyTracker adds a placeholder set for tracking
|
||||
// matched keys for the given level. This should only be
|
||||
// called from `structFromUnstructured`.
|
||||
func (c *fromUnstructuredContext) pushMatchedKeyTracker() {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
c.matchedKeys = append(c.matchedKeys, nil)
|
||||
}
|
||||
|
||||
// recordMatchedKey initializes the last element of matchedKeys
|
||||
// (if needed) and sets 'key'. This should only be called from
|
||||
// `structFromUnstructured`.
|
||||
func (c *fromUnstructuredContext) recordMatchedKey(key string) {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
last := len(c.matchedKeys) - 1
|
||||
if c.matchedKeys[last] == nil {
|
||||
c.matchedKeys[last] = map[string]struct{}{}
|
||||
}
|
||||
c.matchedKeys[last][key] = struct{}{}
|
||||
}
|
||||
|
||||
// popAndVerifyMatchedKeys pops the last element of matchedKeys,
|
||||
// checks the matched keys against the data, and adds unknown
|
||||
// field errors for any matched keys.
|
||||
// `mapValue` is the value of sv containing all of the keys that exist at this level
|
||||
// (ie. sv.MapKeys) in the source data.
|
||||
// `matchedKeys` are all the keys found for that level in the destination object.
|
||||
// This should only be called from `structFromUnstructured`.
|
||||
func (c *fromUnstructuredContext) popAndVerifyMatchedKeys(mapValue reflect.Value) {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
last := len(c.matchedKeys) - 1
|
||||
curMatchedKeys := c.matchedKeys[last]
|
||||
c.matchedKeys[last] = nil
|
||||
c.matchedKeys = c.matchedKeys[:last]
|
||||
for _, key := range mapValue.MapKeys() {
|
||||
if _, ok := curMatchedKeys[key.String()]; !ok {
|
||||
c.recordUnknownField(key.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fromUnstructuredContext) recordUnknownField(field string) {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
pathLen := len(c.parentPath)
|
||||
c.pushKey(field)
|
||||
errPath := strings.Join(c.parentPath, "")
|
||||
c.parentPath = c.parentPath[:pathLen]
|
||||
c.unknownFieldErrors = append(c.unknownFieldErrors, fmt.Errorf(`unknown field "%s"`, errPath))
|
||||
}
|
||||
|
||||
func (c *fromUnstructuredContext) pushIndex(index int) {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
c.parentPath = append(c.parentPath, "[", strconv.Itoa(index), "]")
|
||||
}
|
||||
|
||||
func (c *fromUnstructuredContext) pushKey(key string) {
|
||||
if !c.returnUnknownFields {
|
||||
return
|
||||
}
|
||||
|
||||
if len(c.parentPath) > 0 {
|
||||
c.parentPath = append(c.parentPath, ".")
|
||||
}
|
||||
c.parentPath = append(c.parentPath, key)
|
||||
|
||||
}
|
||||
|
||||
// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type.
|
||||
// It uses encoding/json/Unmarshaler if object implements it or reflection if not.
|
||||
func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error {
|
||||
// It takes a validationDirective that indicates how to behave when it encounters unknown fields.
|
||||
func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error {
|
||||
t := reflect.TypeOf(obj)
|
||||
value := reflect.ValueOf(obj)
|
||||
if t.Kind() != reflect.Ptr || value.IsNil() {
|
||||
if t.Kind() != reflect.Pointer || value.IsNil() {
|
||||
return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t)
|
||||
}
|
||||
err := fromUnstructured(reflect.ValueOf(u), value.Elem())
|
||||
|
||||
fromUnstructuredContext := &fromUnstructuredContext{
|
||||
returnUnknownFields: returnUnknownFields,
|
||||
}
|
||||
err := fromUnstructured(reflect.ValueOf(u), value.Elem(), fromUnstructuredContext)
|
||||
if c.mismatchDetection {
|
||||
newObj := reflect.New(t.Elem()).Interface()
|
||||
newErr := fromUnstructuredViaJSON(u, newObj)
|
||||
@ -134,7 +255,23 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i
|
||||
klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
|
||||
}
|
||||
}
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if returnUnknownFields && len(fromUnstructuredContext.unknownFieldErrors) > 0 {
|
||||
sort.Slice(fromUnstructuredContext.unknownFieldErrors, func(i, j int) bool {
|
||||
return fromUnstructuredContext.unknownFieldErrors[i].Error() <
|
||||
fromUnstructuredContext.unknownFieldErrors[j].Error()
|
||||
})
|
||||
return NewStrictDecodingError(fromUnstructuredContext.unknownFieldErrors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FromUnstructured converts an object from map[string]interface{} representation into a concrete type.
|
||||
// It uses encoding/json/Unmarshaler if object implements it or reflection if not.
|
||||
func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error {
|
||||
return c.FromUnstructuredWithValidation(u, obj, false)
|
||||
}
|
||||
|
||||
func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error {
|
||||
@ -145,7 +282,7 @@ func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error {
|
||||
return json.Unmarshal(data, obj)
|
||||
}
|
||||
|
||||
func fromUnstructured(sv, dv reflect.Value) error {
|
||||
func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error {
|
||||
sv = unwrapInterface(sv)
|
||||
if !sv.IsValid() {
|
||||
dv.Set(reflect.Zero(dv.Type()))
|
||||
@ -154,7 +291,7 @@ func fromUnstructured(sv, dv reflect.Value) error {
|
||||
st, dt := sv.Type(), dv.Type()
|
||||
|
||||
switch dt.Kind() {
|
||||
case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface:
|
||||
case reflect.Map, reflect.Slice, reflect.Pointer, reflect.Struct, reflect.Interface:
|
||||
// Those require non-trivial conversion.
|
||||
default:
|
||||
// This should handle all simple types.
|
||||
@ -213,18 +350,19 @@ func fromUnstructured(sv, dv reflect.Value) error {
|
||||
|
||||
switch dt.Kind() {
|
||||
case reflect.Map:
|
||||
return mapFromUnstructured(sv, dv)
|
||||
return mapFromUnstructured(sv, dv, ctx)
|
||||
case reflect.Slice:
|
||||
return sliceFromUnstructured(sv, dv)
|
||||
case reflect.Ptr:
|
||||
return pointerFromUnstructured(sv, dv)
|
||||
return sliceFromUnstructured(sv, dv, ctx)
|
||||
case reflect.Pointer:
|
||||
return pointerFromUnstructured(sv, dv, ctx)
|
||||
case reflect.Struct:
|
||||
return structFromUnstructured(sv, dv)
|
||||
return structFromUnstructured(sv, dv, ctx)
|
||||
case reflect.Interface:
|
||||
return interfaceFromUnstructured(sv, dv)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized type: %v", dt.Kind())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
|
||||
@ -275,7 +413,7 @@ func unwrapInterface(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
||||
|
||||
func mapFromUnstructured(sv, dv reflect.Value) error {
|
||||
func mapFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error {
|
||||
st, dt := sv.Type(), dv.Type()
|
||||
if st.Kind() != reflect.Map {
|
||||
return fmt.Errorf("cannot restore map from %v", st.Kind())
|
||||
@ -293,7 +431,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error {
|
||||
for _, key := range sv.MapKeys() {
|
||||
value := reflect.New(dt.Elem()).Elem()
|
||||
if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() {
|
||||
if err := fromUnstructured(val, value); err != nil {
|
||||
if err := fromUnstructured(val, value, ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -308,7 +446,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func sliceFromUnstructured(sv, dv reflect.Value) error {
|
||||
func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error {
|
||||
st, dt := sv.Type(), dv.Type()
|
||||
if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 {
|
||||
// We store original []byte representation as string.
|
||||
@ -340,56 +478,88 @@ func sliceFromUnstructured(sv, dv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
|
||||
|
||||
pathLen := len(ctx.parentPath)
|
||||
defer func() {
|
||||
ctx.parentPath = ctx.parentPath[:pathLen]
|
||||
}()
|
||||
for i := 0; i < sv.Len(); i++ {
|
||||
if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil {
|
||||
ctx.pushIndex(i)
|
||||
if err := fromUnstructured(sv.Index(i), dv.Index(i), ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.parentPath = ctx.parentPath[:pathLen]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pointerFromUnstructured(sv, dv reflect.Value) error {
|
||||
func pointerFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error {
|
||||
st, dt := sv.Type(), dv.Type()
|
||||
|
||||
if st.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
if st.Kind() == reflect.Pointer && sv.IsNil() {
|
||||
dv.Set(reflect.Zero(dt))
|
||||
return nil
|
||||
}
|
||||
dv.Set(reflect.New(dt.Elem()))
|
||||
switch st.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return fromUnstructured(sv.Elem(), dv.Elem())
|
||||
case reflect.Pointer, reflect.Interface:
|
||||
return fromUnstructured(sv.Elem(), dv.Elem(), ctx)
|
||||
default:
|
||||
return fromUnstructured(sv, dv.Elem())
|
||||
return fromUnstructured(sv, dv.Elem(), ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func structFromUnstructured(sv, dv reflect.Value) error {
|
||||
func structFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error {
|
||||
st, dt := sv.Type(), dv.Type()
|
||||
if st.Kind() != reflect.Map {
|
||||
return fmt.Errorf("cannot restore struct from: %v", st.Kind())
|
||||
}
|
||||
|
||||
pathLen := len(ctx.parentPath)
|
||||
svInlined := ctx.isInlined
|
||||
defer func() {
|
||||
ctx.parentPath = ctx.parentPath[:pathLen]
|
||||
ctx.isInlined = svInlined
|
||||
}()
|
||||
if !svInlined {
|
||||
ctx.pushMatchedKeyTracker()
|
||||
}
|
||||
for i := 0; i < dt.NumField(); i++ {
|
||||
fieldInfo := fieldInfoFromField(dt, i)
|
||||
fv := dv.Field(i)
|
||||
|
||||
if len(fieldInfo.name) == 0 {
|
||||
// This field is inlined.
|
||||
if err := fromUnstructured(sv, fv); err != nil {
|
||||
// This field is inlined, recurse into fromUnstructured again
|
||||
// with the same set of matched keys.
|
||||
ctx.isInlined = true
|
||||
if err := fromUnstructured(sv, fv, ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.isInlined = svInlined
|
||||
} else {
|
||||
// This field is not inlined so we recurse into
|
||||
// child field of sv corresponding to field i of
|
||||
// dv, with a new set of matchedKeys and updating
|
||||
// the parentPath to indicate that we are one level
|
||||
// deeper.
|
||||
ctx.recordMatchedKey(fieldInfo.name)
|
||||
value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue))
|
||||
if value.IsValid() {
|
||||
if err := fromUnstructured(value, fv); err != nil {
|
||||
ctx.isInlined = false
|
||||
ctx.pushKey(fieldInfo.name)
|
||||
if err := fromUnstructured(value, fv, ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.parentPath = ctx.parentPath[:pathLen]
|
||||
ctx.isInlined = svInlined
|
||||
} else {
|
||||
fv.Set(reflect.Zero(fv.Type()))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !svInlined {
|
||||
ctx.popAndVerifyMatchedKeys(sv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -409,7 +579,7 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte
|
||||
} else {
|
||||
t := reflect.TypeOf(obj)
|
||||
value := reflect.ValueOf(obj)
|
||||
if t.Kind() != reflect.Ptr || value.IsNil() {
|
||||
if t.Kind() != reflect.Pointer || value.IsNil() {
|
||||
return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t)
|
||||
}
|
||||
u = map[string]interface{}{}
|
||||
@ -516,7 +686,7 @@ func toUnstructured(sv, dv reflect.Value) error {
|
||||
return mapToUnstructured(sv, dv)
|
||||
case reflect.Slice:
|
||||
return sliceToUnstructured(sv, dv)
|
||||
case reflect.Ptr:
|
||||
case reflect.Pointer:
|
||||
return pointerToUnstructured(sv, dv)
|
||||
case reflect.Struct:
|
||||
return structToUnstructured(sv, dv)
|
||||
@ -535,24 +705,13 @@ func mapToUnstructured(sv, dv reflect.Value) error {
|
||||
}
|
||||
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
|
||||
if st.Key().Kind() == reflect.String {
|
||||
switch st.Elem().Kind() {
|
||||
// TODO It should be possible to reuse the slice for primitive types.
|
||||
// However, it is panicing in the following form.
|
||||
// case reflect.String, reflect.Bool,
|
||||
// reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
// reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// sv.Set(sv)
|
||||
// return nil
|
||||
default:
|
||||
// We need to do a proper conversion.
|
||||
}
|
||||
dv.Set(reflect.MakeMap(mapStringInterfaceType))
|
||||
dv = dv.Elem()
|
||||
dt = dv.Type()
|
||||
}
|
||||
dv.Set(reflect.MakeMap(mapStringInterfaceType))
|
||||
dv = dv.Elem()
|
||||
dt = dv.Type()
|
||||
}
|
||||
if dt.Kind() != reflect.Map {
|
||||
return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
|
||||
return fmt.Errorf("cannot convert map to: %v", dt.Kind())
|
||||
}
|
||||
|
||||
if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
|
||||
@ -593,20 +752,9 @@ func sliceToUnstructured(sv, dv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
|
||||
switch st.Elem().Kind() {
|
||||
// TODO It should be possible to reuse the slice for primitive types.
|
||||
// However, it is panicing in the following form.
|
||||
// case reflect.String, reflect.Bool,
|
||||
// reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
// reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// sv.Set(sv)
|
||||
// return nil
|
||||
default:
|
||||
// We need to do a proper conversion.
|
||||
dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
|
||||
dv = dv.Elem()
|
||||
dt = dv.Type()
|
||||
}
|
||||
dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
|
||||
dv = dv.Elem()
|
||||
dt = dv.Type()
|
||||
}
|
||||
if dt.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("cannot convert slice to: %v", dt.Kind())
|
||||
@ -642,7 +790,7 @@ func isZero(v reflect.Value) bool {
|
||||
case reflect.Map, reflect.Slice:
|
||||
// TODO: It seems that 0-len maps are ignored in it.
|
||||
return v.IsNil() || v.Len() == 0
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
case reflect.Pointer, reflect.Interface:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
|
33
vendor/k8s.io/apimachinery/pkg/runtime/error.go
generated
vendored
33
vendor/k8s.io/apimachinery/pkg/runtime/error.go
generated
vendored
@ -19,6 +19,7 @@ package runtime
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
@ -124,20 +125,30 @@ func IsMissingVersion(err error) bool {
|
||||
// strictDecodingError is a base error type that is returned by a strict Decoder such
|
||||
// as UniversalStrictDecoder.
|
||||
type strictDecodingError struct {
|
||||
message string
|
||||
data string
|
||||
errors []error
|
||||
}
|
||||
|
||||
// NewStrictDecodingError creates a new strictDecodingError object.
|
||||
func NewStrictDecodingError(message string, data string) error {
|
||||
func NewStrictDecodingError(errors []error) error {
|
||||
return &strictDecodingError{
|
||||
message: message,
|
||||
data: data,
|
||||
errors: errors,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *strictDecodingError) Error() string {
|
||||
return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message)
|
||||
var s strings.Builder
|
||||
s.WriteString("strict decoding error: ")
|
||||
for i, err := range e.errors {
|
||||
if i != 0 {
|
||||
s.WriteString(", ")
|
||||
}
|
||||
s.WriteString(err.Error())
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (e *strictDecodingError) Errors() []error {
|
||||
return e.errors
|
||||
}
|
||||
|
||||
// IsStrictDecodingError returns true if the error indicates that the provided object
|
||||
@ -149,3 +160,13 @@ func IsStrictDecodingError(err error) bool {
|
||||
_, ok := err.(*strictDecodingError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// AsStrictDecodingError returns a strict decoding error
|
||||
// containing all the strictness violations.
|
||||
func AsStrictDecodingError(err error) (*strictDecodingError, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
strictErr, ok := err.(*strictDecodingError)
|
||||
return strictErr, ok
|
||||
}
|
||||
|
50
vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
generated
vendored
50
vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
generated
vendored
@ -137,31 +137,31 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_9d3c45d7f546725c = []byte{
|
||||
// 378 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
|
||||
0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
|
||||
0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
|
||||
0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
|
||||
0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
|
||||
0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
|
||||
0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
|
||||
0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
|
||||
0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
|
||||
0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
|
||||
0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
|
||||
0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
|
||||
0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
|
||||
0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
|
||||
0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
|
||||
0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
|
||||
0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
|
||||
0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
|
||||
0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
|
||||
0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
|
||||
0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
|
||||
0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
|
||||
0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
|
||||
0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
|
||||
// 380 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0xaa, 0x13, 0x31,
|
||||
0x14, 0xc6, 0x27, 0xb7, 0x85, 0x7b, 0x4d, 0x0b, 0x57, 0xe2, 0xc2, 0xd1, 0x45, 0xe6, 0xd2, 0x95,
|
||||
0x77, 0x61, 0x02, 0x17, 0x04, 0xb7, 0x9d, 0x52, 0x50, 0x44, 0x90, 0xe0, 0x1f, 0x70, 0x65, 0x3a,
|
||||
0x13, 0xa7, 0x61, 0xe8, 0xc9, 0x90, 0x66, 0x1c, 0xbb, 0xf3, 0x11, 0x7c, 0xac, 0x2e, 0xbb, 0xec,
|
||||
0xaa, 0xd8, 0xf1, 0x21, 0xdc, 0x4a, 0xd3, 0xb4, 0x56, 0x5d, 0x74, 0x97, 0x73, 0xbe, 0xef, 0xf7,
|
||||
0x9d, 0x73, 0x20, 0xf8, 0x45, 0xf9, 0x7c, 0xce, 0xb4, 0xe1, 0x65, 0x3d, 0x51, 0x16, 0x94, 0x53,
|
||||
0x73, 0xfe, 0x45, 0x41, 0x6e, 0x2c, 0x0f, 0x82, 0xac, 0xf4, 0x4c, 0x66, 0x53, 0x0d, 0xca, 0x2e,
|
||||
0x78, 0x55, 0x16, 0xdc, 0xd6, 0xe0, 0xf4, 0x4c, 0xf1, 0x42, 0x81, 0xb2, 0xd2, 0xa9, 0x9c, 0x55,
|
||||
0xd6, 0x38, 0x43, 0x92, 0x3d, 0xc0, 0x4e, 0x01, 0x56, 0x95, 0x05, 0x0b, 0xc0, 0xe3, 0xa7, 0x85,
|
||||
0x76, 0xd3, 0x7a, 0xc2, 0x32, 0x33, 0xe3, 0x85, 0x29, 0x0c, 0xf7, 0xdc, 0xa4, 0xfe, 0xec, 0x2b,
|
||||
0x5f, 0xf8, 0xd7, 0x3e, 0x6f, 0x70, 0x8b, 0xfb, 0x42, 0x36, 0xe3, 0xaf, 0x4e, 0xc1, 0x5c, 0x1b,
|
||||
0x20, 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0x37, 0xe8, 0x49, 0x3f, 0xbd, 0x6c, 0x37, 0x49, 0x47,
|
||||
0xc8, 0x46, 0xec, 0x7a, 0x83, 0x4f, 0xf8, 0xea, 0xed, 0xa2, 0x52, 0xaf, 0x95, 0x93, 0xe4, 0x0e,
|
||||
0x63, 0x59, 0xe9, 0xf7, 0xca, 0xee, 0x20, 0xef, 0xbe, 0x97, 0x92, 0xe5, 0x26, 0x89, 0xda, 0x4d,
|
||||
0x82, 0x87, 0x6f, 0x5e, 0x06, 0x45, 0x9c, 0xb8, 0xc8, 0x0d, 0xee, 0x96, 0x1a, 0xf2, 0xf8, 0xc2,
|
||||
0xbb, 0xfb, 0xc1, 0xdd, 0x7d, 0xa5, 0x21, 0x17, 0x5e, 0x19, 0xfc, 0x42, 0xf8, 0xf2, 0x1d, 0x94,
|
||||
0x60, 0x1a, 0x20, 0x1f, 0xf0, 0x95, 0x0b, 0xd3, 0x7c, 0x7e, 0xef, 0xee, 0x96, 0x9d, 0xb9, 0x9d,
|
||||
0x1d, 0xd6, 0x4b, 0xef, 0x87, 0xf0, 0xe3, 0xc2, 0xe2, 0x18, 0x76, 0xb8, 0xf0, 0xe2, 0xff, 0x0b,
|
||||
0xc9, 0x10, 0x5f, 0x67, 0x06, 0x9c, 0x02, 0x37, 0x86, 0xcc, 0xe4, 0x1a, 0x8a, 0xb8, 0xe3, 0x97,
|
||||
0x7d, 0x18, 0xf2, 0xae, 0x47, 0x7f, 0xcb, 0xe2, 0x5f, 0x3f, 0x79, 0x86, 0x7b, 0xa1, 0xb5, 0x1b,
|
||||
0x1d, 0x77, 0x3d, 0xfe, 0x20, 0xe0, 0xbd, 0xd1, 0x1f, 0x49, 0x9c, 0xfa, 0xd2, 0xf1, 0x72, 0x4b,
|
||||
0xa3, 0xd5, 0x96, 0x46, 0xeb, 0x2d, 0x8d, 0xbe, 0xb5, 0x14, 0x2d, 0x5b, 0x8a, 0x56, 0x2d, 0x45,
|
||||
0xeb, 0x96, 0xa2, 0x1f, 0x2d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0x63, 0x72, 0xe6, 0xb7, 0xfc, 0x0e,
|
||||
0x00, 0x00, 0xff, 0xff, 0x1f, 0x32, 0xd5, 0x68, 0x68, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *RawExtension) Marshal() (dAtA []byte, err error) {
|
||||
|
61
vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
generated
vendored
61
vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
generated
vendored
@ -22,7 +22,7 @@ syntax = "proto2";
|
||||
package k8s.io.apimachinery.pkg.runtime;
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "runtime";
|
||||
option go_package = "k8s.io/apimachinery/pkg/runtime";
|
||||
|
||||
// RawExtension is used to hold extensions in external versions.
|
||||
//
|
||||
@ -31,32 +31,37 @@ option go_package = "runtime";
|
||||
// various plugin types.
|
||||
//
|
||||
// // Internal package:
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.Object `json:"myPlugin"`
|
||||
// }
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.Object `json:"myPlugin"`
|
||||
// }
|
||||
//
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// // External package:
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.RawExtension `json:"myPlugin"`
|
||||
// }
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.RawExtension `json:"myPlugin"`
|
||||
// }
|
||||
//
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// // On the wire, the JSON will look something like this:
|
||||
// {
|
||||
// "kind":"MyAPIObject",
|
||||
// "apiVersion":"v1",
|
||||
// "myPlugin": {
|
||||
// "kind":"PluginA",
|
||||
// "aOption":"foo",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "kind":"MyAPIObject",
|
||||
// "apiVersion":"v1",
|
||||
// "myPlugin": {
|
||||
// "kind":"PluginA",
|
||||
// "aOption":"foo",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
|
||||
// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
|
||||
@ -78,10 +83,12 @@ message RawExtension {
|
||||
|
||||
// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
|
||||
// like this:
|
||||
// type MyAwesomeAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// ... // other fields
|
||||
// }
|
||||
//
|
||||
// type MyAwesomeAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// ... // other fields
|
||||
// }
|
||||
//
|
||||
// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
|
||||
//
|
||||
// TypeMeta is provided here for convenience. You may use it directly from this package or define
|
||||
|
40
vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
generated
vendored
40
vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
generated
vendored
@ -69,6 +69,24 @@ type Encoder interface {
|
||||
Identifier() Identifier
|
||||
}
|
||||
|
||||
// MemoryAllocator is responsible for allocating memory.
|
||||
// By encapsulating memory allocation into its own interface, we can reuse the memory
|
||||
// across many operations in places we know it can significantly improve the performance.
|
||||
type MemoryAllocator interface {
|
||||
// Allocate reserves memory for n bytes.
|
||||
// Note that implementations of this method are not required to zero the returned array.
|
||||
// It is the caller's responsibility to clean the memory if needed.
|
||||
Allocate(n uint64) []byte
|
||||
}
|
||||
|
||||
// EncoderWithAllocator serializes objects in a way that allows callers to manage any additional memory allocations.
|
||||
type EncoderWithAllocator interface {
|
||||
Encoder
|
||||
// EncodeWithAllocator writes an object to a stream as Encode does.
|
||||
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization
|
||||
EncodeWithAllocator(obj Object, w io.Writer, memAlloc MemoryAllocator) error
|
||||
}
|
||||
|
||||
// Decoder attempts to load an object from data.
|
||||
type Decoder interface {
|
||||
// Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
|
||||
@ -125,6 +143,9 @@ type SerializerInfo struct {
|
||||
// PrettySerializer, if set, can serialize this object in a form biased towards
|
||||
// readability.
|
||||
PrettySerializer Serializer
|
||||
// StrictSerializer, if set, deserializes this object strictly,
|
||||
// erring on unknown fields.
|
||||
StrictSerializer Serializer
|
||||
// StreamSerializer, if set, describes the streaming serialization format
|
||||
// for this media type.
|
||||
StreamSerializer *StreamSerializerInfo
|
||||
@ -150,7 +171,7 @@ type NegotiatedSerializer interface {
|
||||
// EncoderForVersion returns an encoder that ensures objects being written to the provided
|
||||
// serializer are in the provided group version.
|
||||
EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
|
||||
// DecoderForVersion returns a decoder that ensures objects being read by the provided
|
||||
// DecoderToVersion returns a decoder that ensures objects being read by the provided
|
||||
// serializer are in the provided group version by default.
|
||||
DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
|
||||
}
|
||||
@ -204,6 +225,12 @@ type NestedObjectEncoder interface {
|
||||
|
||||
// NestedObjectDecoder is an optional interface that objects may implement to be given
|
||||
// an opportunity to decode any nested Objects / RawExtensions during serialization.
|
||||
// It is possible for DecodeNestedObjects to return a non-nil error but for the decoding
|
||||
// to have succeeded in the case of strict decoding errors (e.g. unknown/duplicate fields).
|
||||
// As such it is important for callers of DecodeNestedObjects to check to confirm whether
|
||||
// an error is a runtime.StrictDecodingError before short circuiting.
|
||||
// Similarly, implementations of DecodeNestedObjects should ensure that a runtime.StrictDecodingError
|
||||
// is only returned when the rest of decoding has succeeded.
|
||||
type NestedObjectDecoder interface {
|
||||
DecodeNestedObjects(d Decoder) error
|
||||
}
|
||||
@ -281,14 +308,11 @@ type ResourceVersioner interface {
|
||||
ResourceVersion(obj Object) (string, error)
|
||||
}
|
||||
|
||||
// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object.
|
||||
type SelfLinker interface {
|
||||
SetSelfLink(obj Object, selfLink string) error
|
||||
SelfLink(obj Object) (string, error)
|
||||
|
||||
// Knowing Name is sometimes necessary to use a SelfLinker.
|
||||
// Namer provides methods for retrieving name and namespace of an API object.
|
||||
type Namer interface {
|
||||
// Name returns the name of a given object.
|
||||
Name(obj Object) (string, error)
|
||||
// Knowing Namespace is sometimes necessary to use a SelfLinker
|
||||
// Namespace returns the name of a given object.
|
||||
Namespace(obj Object) (string, error)
|
||||
}
|
||||
|
||||
|
26
vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
generated
vendored
26
vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
generated
vendored
@ -43,17 +43,17 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_0462724132518e0d = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30,
|
||||
0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50,
|
||||
0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a,
|
||||
0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68,
|
||||
0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc,
|
||||
0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b,
|
||||
0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e,
|
||||
0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77,
|
||||
0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87,
|
||||
0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00,
|
||||
0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff,
|
||||
0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00,
|
||||
// 186 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xad, 0x8e, 0xc3, 0x30,
|
||||
0x0c, 0xc0, 0xf1, 0x84, 0x1e, 0x3c, 0x78, 0xc0, 0xb0, 0xec, 0x62, 0x7a, 0xf8, 0xf0, 0xa4, 0xf1,
|
||||
0xb1, 0xb4, 0xf5, 0xd2, 0x28, 0xca, 0x87, 0xd2, 0x64, 0xd2, 0xd8, 0x1e, 0x61, 0x8f, 0x55, 0x58,
|
||||
0x58, 0xb8, 0x66, 0x2f, 0x32, 0x29, 0x2d, 0x18, 0x1c, 0xf3, 0x5f, 0xd6, 0xcf, 0xf2, 0xd7, 0xd1,
|
||||
0xfc, 0x8d, 0x42, 0x7b, 0x34, 0xb9, 0xa5, 0xe8, 0x28, 0xd1, 0x88, 0x17, 0x72, 0xbd, 0x8f, 0xb8,
|
||||
0x2f, 0x64, 0xd0, 0x56, 0x76, 0x83, 0x76, 0x14, 0xaf, 0x18, 0x8c, 0xc2, 0x98, 0x5d, 0xd2, 0x96,
|
||||
0x70, 0xec, 0x06, 0xb2, 0x12, 0x15, 0x39, 0x8a, 0x32, 0x51, 0x2f, 0x42, 0xf4, 0xc9, 0x7f, 0x37,
|
||||
0x9b, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0xc4, 0xe6, 0x7e, 0x7e, 0x95, 0x4e, 0x43, 0x6e,
|
||||
0x45, 0xe7, 0x2d, 0x2a, 0xaf, 0x3c, 0x56, 0xde, 0xe6, 0x73, 0xad, 0x1a, 0x75, 0xda, 0xce, 0xfe,
|
||||
0x1f, 0xa6, 0x15, 0xd8, 0xbc, 0x02, 0x5b, 0x56, 0x60, 0xb7, 0x02, 0x7c, 0x2a, 0xc0, 0xe7, 0x02,
|
||||
0x7c, 0x29, 0xc0, 0x1f, 0x05, 0xf8, 0xfd, 0x09, 0xec, 0xd4, 0x7c, 0xf6, 0xf4, 0x2b, 0x00, 0x00,
|
||||
0xff, 0xff, 0x12, 0xb4, 0xae, 0x48, 0xf6, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
generated
vendored
@ -22,5 +22,5 @@ syntax = "proto2";
|
||||
package k8s.io.apimachinery.pkg.runtime.schema;
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "schema";
|
||||
option go_package = "k8s.io/apimachinery/pkg/runtime/schema";
|
||||
|
||||
|
6
vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
generated
vendored
6
vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
generated
vendored
@ -191,7 +191,8 @@ func (gv GroupVersion) Identifier() string {
|
||||
// if none of the options match the group. It prefers a match to group and version over just group.
|
||||
// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
|
||||
// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
|
||||
// in fewer places.
|
||||
//
|
||||
// in fewer places.
|
||||
func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) {
|
||||
for _, gvk := range kinds {
|
||||
if gvk.Group == gv.Group && gvk.Version == gv.Version {
|
||||
@ -239,7 +240,8 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
|
||||
// GroupVersions can be used to represent a set of desired group versions.
|
||||
// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme.
|
||||
// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
|
||||
// in fewer places.
|
||||
//
|
||||
// in fewer places.
|
||||
type GroupVersions []GroupVersion
|
||||
|
||||
// Identifier implements runtime.GroupVersioner interface.
|
||||
|
39
vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
generated
vendored
39
vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
generated
vendored
@ -44,11 +44,11 @@ import (
|
||||
// Schemes are not expected to change at runtime and are only threadsafe after
|
||||
// registration is complete.
|
||||
type Scheme struct {
|
||||
// versionMap allows one to figure out the go type of an object with
|
||||
// gvkToType allows one to figure out the go type of an object with
|
||||
// the given version and name.
|
||||
gvkToType map[schema.GroupVersionKind]reflect.Type
|
||||
|
||||
// typeToGroupVersion allows one to find metadata for a given go object.
|
||||
// typeToGVK allows one to find metadata for a given go object.
|
||||
// The reflect.Type we index by should *not* be a pointer.
|
||||
typeToGVK map[reflect.Type][]schema.GroupVersionKind
|
||||
|
||||
@ -64,7 +64,7 @@ type Scheme struct {
|
||||
// resource field labels in that version to internal version.
|
||||
fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
|
||||
|
||||
// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
|
||||
// defaulterFuncs is a map to funcs to be called with an object to provide defaulting
|
||||
// the provided object must be a pointer.
|
||||
defaulterFuncs map[reflect.Type]func(interface{})
|
||||
|
||||
@ -99,7 +99,7 @@ func NewScheme() *Scheme {
|
||||
versionPriority: map[string][]string{},
|
||||
schemeName: naming.GetNameFromCallsite(internalPackages...),
|
||||
}
|
||||
s.converter = conversion.NewConverter(s.nameFunc)
|
||||
s.converter = conversion.NewConverter(nil)
|
||||
|
||||
// Enable couple default conversions by default.
|
||||
utilruntime.Must(RegisterEmbeddedConversions(s))
|
||||
@ -107,28 +107,6 @@ func NewScheme() *Scheme {
|
||||
return s
|
||||
}
|
||||
|
||||
// nameFunc returns the name of the type that we wish to use to determine when two types attempt
|
||||
// a conversion. Defaults to the go name of the type if the type is not registered.
|
||||
func (s *Scheme) nameFunc(t reflect.Type) string {
|
||||
// find the preferred names for this type
|
||||
gvks, ok := s.typeToGVK[t]
|
||||
if !ok {
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
for _, gvk := range gvks {
|
||||
internalGV := gvk.GroupVersion()
|
||||
internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in
|
||||
internalGVK := internalGV.WithKind(gvk.Kind)
|
||||
|
||||
if internalType, exists := s.gvkToType[internalGVK]; exists {
|
||||
return s.typeToGVK[internalType][0].Kind
|
||||
}
|
||||
}
|
||||
|
||||
return gvks[0].Kind
|
||||
}
|
||||
|
||||
// Converter allows access to the converter for the scheme
|
||||
func (s *Scheme) Converter() *conversion.Converter {
|
||||
return s.converter
|
||||
@ -140,7 +118,8 @@ func (s *Scheme) Converter() *conversion.Converter {
|
||||
// API group and version that would never be updated.
|
||||
//
|
||||
// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
|
||||
// every version with particular schemas. Resolve this method at that point.
|
||||
//
|
||||
// every version with particular schemas. Resolve this method at that point.
|
||||
func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) {
|
||||
s.addObservedVersion(version)
|
||||
s.AddKnownTypes(version, types...)
|
||||
@ -163,7 +142,7 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) {
|
||||
s.addObservedVersion(gv)
|
||||
for _, obj := range types {
|
||||
t := reflect.TypeOf(obj)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
if t.Kind() != reflect.Pointer {
|
||||
panic("All types must be pointers to structs.")
|
||||
}
|
||||
t = t.Elem()
|
||||
@ -181,7 +160,7 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
|
||||
if len(gvk.Version) == 0 {
|
||||
panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
|
||||
}
|
||||
if t.Kind() != reflect.Ptr {
|
||||
if t.Kind() != reflect.Pointer {
|
||||
panic("All types must be pointers to structs.")
|
||||
}
|
||||
t = t.Elem()
|
||||
@ -484,7 +463,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
|
||||
} else {
|
||||
// determine the incoming kinds with as few allocations as possible.
|
||||
t = reflect.TypeOf(in)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
if t.Kind() != reflect.Pointer {
|
||||
return nil, fmt.Errorf("only pointer types may be converted: %v", t)
|
||||
}
|
||||
t = t.Elem()
|
||||
|
19
vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
generated
vendored
19
vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
generated
vendored
@ -40,6 +40,7 @@ type serializerType struct {
|
||||
|
||||
Serializer runtime.Serializer
|
||||
PrettySerializer runtime.Serializer
|
||||
StrictSerializer runtime.Serializer
|
||||
|
||||
AcceptStreamContentTypes []string
|
||||
StreamContentType string
|
||||
@ -70,10 +71,20 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
|
||||
)
|
||||
}
|
||||
|
||||
strictJSONSerializer := json.NewSerializerWithOptions(
|
||||
mf, scheme, scheme,
|
||||
json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
|
||||
)
|
||||
jsonSerializerType.StrictSerializer = strictJSONSerializer
|
||||
|
||||
yamlSerializer := json.NewSerializerWithOptions(
|
||||
mf, scheme, scheme,
|
||||
json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
|
||||
)
|
||||
strictYAMLSerializer := json.NewSerializerWithOptions(
|
||||
mf, scheme, scheme,
|
||||
json.SerializerOptions{Yaml: true, Pretty: false, Strict: true},
|
||||
)
|
||||
protoSerializer := protobuf.NewSerializer(scheme, scheme)
|
||||
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
|
||||
|
||||
@ -85,12 +96,16 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
|
||||
FileExtensions: []string{"yaml"},
|
||||
EncodesAsText: true,
|
||||
Serializer: yamlSerializer,
|
||||
StrictSerializer: strictYAMLSerializer,
|
||||
},
|
||||
{
|
||||
AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
|
||||
ContentType: runtime.ContentTypeProtobuf,
|
||||
FileExtensions: []string{"pb"},
|
||||
Serializer: protoSerializer,
|
||||
// note, strict decoding is unsupported for protobuf,
|
||||
// fall back to regular serializing
|
||||
StrictSerializer: protoSerializer,
|
||||
|
||||
Framer: protobuf.LengthDelimitedFramer,
|
||||
StreamSerializer: protoRawSerializer,
|
||||
@ -187,6 +202,7 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
|
||||
EncodesAsText: d.EncodesAsText,
|
||||
Serializer: d.Serializer,
|
||||
PrettySerializer: d.PrettySerializer,
|
||||
StrictSerializer: d.StrictSerializer,
|
||||
}
|
||||
|
||||
mediaType, _, err := mime.ParseMediaType(info.MediaType)
|
||||
@ -243,7 +259,8 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
|
||||
// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
|
||||
//
|
||||
// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
|
||||
// All other callers will be forced to request a Codec directly.
|
||||
//
|
||||
// All other callers will be forced to request a Codec directly.
|
||||
func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
|
||||
return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
|
||||
}
|
||||
|
175
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
generated
vendored
175
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
generated
vendored
@ -20,10 +20,8 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/modern-go/reflect2"
|
||||
kjson "sigs.k8s.io/json"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -68,6 +66,7 @@ func identifier(options SerializerOptions) runtime.Identifier {
|
||||
"name": "json",
|
||||
"yaml": strconv.FormatBool(options.Yaml),
|
||||
"pretty": strconv.FormatBool(options.Pretty),
|
||||
"strict": strconv.FormatBool(options.Strict),
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
@ -110,79 +109,6 @@ type Serializer struct {
|
||||
var _ runtime.Serializer = &Serializer{}
|
||||
var _ recognizer.RecognizingDecoder = &Serializer{}
|
||||
|
||||
type customNumberExtension struct {
|
||||
jsoniter.DummyExtension
|
||||
}
|
||||
|
||||
func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
|
||||
if typ.String() == "interface {}" {
|
||||
return customNumberDecoder{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type customNumberDecoder struct {
|
||||
}
|
||||
|
||||
func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
switch iter.WhatIsNext() {
|
||||
case jsoniter.NumberValue:
|
||||
var number jsoniter.Number
|
||||
iter.ReadVal(&number)
|
||||
i64, err := strconv.ParseInt(string(number), 10, 64)
|
||||
if err == nil {
|
||||
*(*interface{})(ptr) = i64
|
||||
return
|
||||
}
|
||||
f64, err := strconv.ParseFloat(string(number), 64)
|
||||
if err == nil {
|
||||
*(*interface{})(ptr) = f64
|
||||
return
|
||||
}
|
||||
iter.ReportError("DecodeNumber", err.Error())
|
||||
default:
|
||||
*(*interface{})(ptr) = iter.Read()
|
||||
}
|
||||
}
|
||||
|
||||
// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
|
||||
// case-sensitive when unmarshalling, and otherwise compatible with
|
||||
// the encoding/json standard library.
|
||||
func CaseSensitiveJSONIterator() jsoniter.API {
|
||||
config := jsoniter.Config{
|
||||
EscapeHTML: true,
|
||||
SortMapKeys: true,
|
||||
ValidateJsonRawMessage: true,
|
||||
CaseSensitive: true,
|
||||
}.Froze()
|
||||
// Force jsoniter to decode number to interface{} via int64/float64, if possible.
|
||||
config.RegisterExtension(&customNumberExtension{})
|
||||
return config
|
||||
}
|
||||
|
||||
// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
|
||||
// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
|
||||
// the encoding/json standard library.
|
||||
func StrictCaseSensitiveJSONIterator() jsoniter.API {
|
||||
config := jsoniter.Config{
|
||||
EscapeHTML: true,
|
||||
SortMapKeys: true,
|
||||
ValidateJsonRawMessage: true,
|
||||
CaseSensitive: true,
|
||||
DisallowUnknownFields: true,
|
||||
}.Froze()
|
||||
// Force jsoniter to decode number to interface{} via int64/float64, if possible.
|
||||
config.RegisterExtension(&customNumberExtension{})
|
||||
return config
|
||||
}
|
||||
|
||||
// Private copies of jsoniter to try to shield against possible mutations
|
||||
// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
|
||||
// in some other library will mess with every usage of the jsoniter library in the whole program.
|
||||
// See https://github.com/json-iterator/go/issues/265
|
||||
var caseSensitiveJSONIterator = CaseSensitiveJSONIterator()
|
||||
var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator()
|
||||
|
||||
// gvkWithDefaults returns group kind and version defaulting from provided default
|
||||
func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
|
||||
if len(actual.Kind) == 0 {
|
||||
@ -237,9 +163,25 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
|
||||
types, _, err := s.typer.ObjectKinds(into)
|
||||
switch {
|
||||
case runtime.IsNotRegisteredError(err), isUnstructured:
|
||||
if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil {
|
||||
strictErrs, err := s.unmarshal(into, data, originalData)
|
||||
if err != nil {
|
||||
return nil, actual, err
|
||||
}
|
||||
|
||||
// when decoding directly into a provided unstructured object,
|
||||
// extract the actual gvk decoded from the provided data,
|
||||
// and ensure it is non-empty.
|
||||
if isUnstructured {
|
||||
*actual = into.GetObjectKind().GroupVersionKind()
|
||||
if len(actual.Kind) == 0 {
|
||||
return nil, actual, runtime.NewMissingKindErr(string(originalData))
|
||||
}
|
||||
// TODO(109023): require apiVersion here as well once unstructuredJSONScheme#Decode does
|
||||
}
|
||||
|
||||
if len(strictErrs) > 0 {
|
||||
return into, actual, runtime.NewStrictDecodingError(strictErrs)
|
||||
}
|
||||
return into, actual, nil
|
||||
case err != nil:
|
||||
return nil, actual, err
|
||||
@ -261,35 +203,12 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
|
||||
return nil, actual, err
|
||||
}
|
||||
|
||||
if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil {
|
||||
return nil, actual, err
|
||||
}
|
||||
|
||||
// If the deserializer is non-strict, return successfully here.
|
||||
if !s.options.Strict {
|
||||
return obj, actual, nil
|
||||
}
|
||||
|
||||
// In strict mode pass the data trough the YAMLToJSONStrict converter.
|
||||
// This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
|
||||
// the output would equal the input, unless there is a parsing error such as duplicate fields.
|
||||
// As we know this was successful in the non-strict case, the only error that may be returned here
|
||||
// is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
|
||||
// the actual error is that the object contains duplicate fields.
|
||||
altered, err := yaml.YAMLToJSONStrict(originalData)
|
||||
strictErrs, err := s.unmarshal(obj, data, originalData)
|
||||
if err != nil {
|
||||
return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
|
||||
return nil, actual, err
|
||||
} else if len(strictErrs) > 0 {
|
||||
return obj, actual, runtime.NewStrictDecodingError(strictErrs)
|
||||
}
|
||||
// As performance is not an issue for now for the strict deserializer (one has regardless to do
|
||||
// the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
|
||||
// fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
|
||||
// due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
|
||||
// the actual error is that the object contains unknown field.
|
||||
strictObj := obj.DeepCopyObject()
|
||||
if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil {
|
||||
return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
|
||||
}
|
||||
// Always return the same object as the non-strict serializer to avoid any deviations.
|
||||
return obj, actual, nil
|
||||
}
|
||||
|
||||
@ -303,7 +222,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
|
||||
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
if s.options.Yaml {
|
||||
json, err := caseSensitiveJSONIterator.Marshal(obj)
|
||||
json, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -316,7 +235,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
}
|
||||
|
||||
if s.options.Pretty {
|
||||
data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ")
|
||||
data, err := json.MarshalIndent(obj, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -327,6 +246,50 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
return encoder.Encode(obj)
|
||||
}
|
||||
|
||||
// IsStrict indicates whether the serializer
|
||||
// uses strict decoding or not
|
||||
func (s *Serializer) IsStrict() bool {
|
||||
return s.options.Strict
|
||||
}
|
||||
|
||||
func (s *Serializer) unmarshal(into runtime.Object, data, originalData []byte) (strictErrs []error, err error) {
|
||||
// If the deserializer is non-strict, return here.
|
||||
if !s.options.Strict {
|
||||
if err := kjson.UnmarshalCaseSensitivePreserveInts(data, into); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if s.options.Yaml {
|
||||
// In strict mode pass the original data through the YAMLToJSONStrict converter.
|
||||
// This is done to catch duplicate fields in YAML that would have been dropped in the original YAMLToJSON conversion.
|
||||
// TODO: rework YAMLToJSONStrict to return warnings about duplicate fields without terminating so we don't have to do this twice.
|
||||
_, err := yaml.YAMLToJSONStrict(originalData)
|
||||
if err != nil {
|
||||
strictErrs = append(strictErrs, err)
|
||||
}
|
||||
}
|
||||
|
||||
var strictJSONErrs []error
|
||||
if u, isUnstructured := into.(runtime.Unstructured); isUnstructured {
|
||||
// Unstructured is a custom unmarshaler that gets delegated
|
||||
// to, so in order to detect strict JSON errors we need
|
||||
// to unmarshal directly into the object.
|
||||
m := map[string]interface{}{}
|
||||
strictJSONErrs, err = kjson.UnmarshalStrict(data, &m)
|
||||
u.SetUnstructuredContent(m)
|
||||
} else {
|
||||
strictJSONErrs, err = kjson.UnmarshalStrict(data, into)
|
||||
}
|
||||
if err != nil {
|
||||
// fatal decoding error, not due to strictness
|
||||
return nil, err
|
||||
}
|
||||
strictErrs = append(strictErrs, strictJSONErrs...)
|
||||
return strictErrs, nil
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (s *Serializer) Identifier() runtime.Identifier {
|
||||
return s.identifier
|
||||
|
76
vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
generated
vendored
76
vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
|
||||
"k8s.io/apimachinery/pkg/util/framer"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -86,6 +87,7 @@ type Serializer struct {
|
||||
}
|
||||
|
||||
var _ runtime.Serializer = &Serializer{}
|
||||
var _ runtime.EncoderWithAllocator = &Serializer{}
|
||||
var _ recognizer.RecognizingDecoder = &Serializer{}
|
||||
|
||||
const serializerIdentifier runtime.Identifier = "protobuf"
|
||||
@ -161,22 +163,36 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
|
||||
return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
|
||||
}
|
||||
|
||||
// Encode serializes the provided object to the given writer.
|
||||
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
// EncodeWithAllocator writes an object to the provided writer.
|
||||
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
|
||||
func (s *Serializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
return s.encode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
// Encode serializes the provided object to the given writer.
|
||||
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
return s.encode(obj, w, &runtime.SimpleAllocator{})
|
||||
}
|
||||
|
||||
func (s *Serializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w)
|
||||
}
|
||||
return s.doEncode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
if memAlloc == nil {
|
||||
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
|
||||
memAlloc = &runtime.SimpleAllocator{}
|
||||
}
|
||||
prefixSize := uint64(len(s.prefix))
|
||||
|
||||
var unk runtime.Unknown
|
||||
switch t := obj.(type) {
|
||||
case *runtime.Unknown:
|
||||
estimatedSize := prefixSize + uint64(t.Size())
|
||||
data := make([]byte, estimatedSize)
|
||||
data := memAlloc.Allocate(estimatedSize)
|
||||
i, err := t.MarshalTo(data[prefixSize:])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -196,11 +212,11 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
|
||||
switch t := obj.(type) {
|
||||
case bufferedMarshaller:
|
||||
// this path performs a single allocation during write but requires the caller to implement
|
||||
// the more efficient Size and MarshalToSizedBuffer methods
|
||||
// this path performs a single allocation during write only when the Allocator wasn't provided
|
||||
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
|
||||
encodedSize := uint64(t.Size())
|
||||
estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
|
||||
data := make([]byte, estimatedSize)
|
||||
data := memAlloc.Allocate(estimatedSize)
|
||||
|
||||
i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
|
||||
if err != nil {
|
||||
@ -221,7 +237,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
unk.Raw = data
|
||||
|
||||
estimatedSize := prefixSize + uint64(unk.Size())
|
||||
data = make([]byte, estimatedSize)
|
||||
data = memAlloc.Allocate(estimatedSize)
|
||||
|
||||
i, err := unk.MarshalTo(data[prefixSize:])
|
||||
if err != nil {
|
||||
@ -395,19 +411,33 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
|
||||
|
||||
// Encode serializes the provided object to the given writer. Overrides is ignored.
|
||||
func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), s.doEncode, w)
|
||||
}
|
||||
return s.doEncode(obj, w)
|
||||
return s.encode(obj, w, &runtime.SimpleAllocator{})
|
||||
}
|
||||
|
||||
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
// EncodeWithAllocator writes an object to the provided writer.
|
||||
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
|
||||
func (s *RawSerializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
return s.encode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w)
|
||||
}
|
||||
return s.doEncode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
if memAlloc == nil {
|
||||
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
|
||||
memAlloc = &runtime.SimpleAllocator{}
|
||||
}
|
||||
switch t := obj.(type) {
|
||||
case bufferedReverseMarshaller:
|
||||
// this path performs a single allocation during write but requires the caller to implement
|
||||
// the more efficient Size and MarshalToSizedBuffer methods
|
||||
// this path performs a single allocation during write only when the Allocator wasn't provided
|
||||
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
|
||||
encodedSize := uint64(t.Size())
|
||||
data := make([]byte, encodedSize)
|
||||
data := memAlloc.Allocate(encodedSize)
|
||||
|
||||
n, err := t.MarshalToSizedBuffer(data)
|
||||
if err != nil {
|
||||
@ -417,10 +447,10 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
return err
|
||||
|
||||
case bufferedMarshaller:
|
||||
// this path performs a single allocation during write but requires the caller to implement
|
||||
// the more efficient Size and MarshalTo methods
|
||||
// this path performs a single allocation during write only when the Allocator wasn't provided
|
||||
// it also requires the caller to implement the more efficient Size and MarshalTo methods
|
||||
encodedSize := uint64(t.Size())
|
||||
data := make([]byte, encodedSize)
|
||||
data := memAlloc.Allocate(encodedSize)
|
||||
|
||||
n, err := t.MarshalTo(data)
|
||||
if err != nil {
|
||||
|
12
vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
generated
vendored
12
vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
generated
vendored
@ -109,10 +109,16 @@ func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime
|
||||
for _, r := range skipped {
|
||||
out, actual, err := r.Decode(data, gvk, into)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
// if we got an object back from the decoder, and the
|
||||
// error was a strict decoding error (e.g. unknown or
|
||||
// duplicate fields), we still consider the recognizer
|
||||
// to have understood the object
|
||||
if out == nil || !runtime.IsStrictDecodingError(err) {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
return out, actual, nil
|
||||
return out, actual, err
|
||||
}
|
||||
|
||||
if lastErr == nil {
|
||||
|
21
vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
generated
vendored
21
vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
generated
vendored
@ -90,7 +90,6 @@ func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object)
|
||||
}
|
||||
// must read the rest of the frame (until we stop getting ErrShortBuffer)
|
||||
d.resetRead = true
|
||||
base = 0
|
||||
return nil, nil, ErrObjectTooLarge
|
||||
}
|
||||
if err != nil {
|
||||
@ -135,3 +134,23 @@ func (e *encoder) Encode(obj runtime.Object) error {
|
||||
e.buf.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
type encoderWithAllocator struct {
|
||||
writer io.Writer
|
||||
encoder runtime.EncoderWithAllocator
|
||||
memAllocator runtime.MemoryAllocator
|
||||
}
|
||||
|
||||
// NewEncoderWithAllocator returns a new streaming encoder
|
||||
func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder {
|
||||
return &encoderWithAllocator{
|
||||
writer: w,
|
||||
encoder: e,
|
||||
memAllocator: a,
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the provided object to the nested writer
|
||||
func (e *encoderWithAllocator) Encode(obj runtime.Object) error {
|
||||
return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator)
|
||||
}
|
||||
|
76
vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
generated
vendored
76
vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
generated
vendored
@ -89,6 +89,8 @@ type codec struct {
|
||||
originalSchemeName string
|
||||
}
|
||||
|
||||
var _ runtime.EncoderWithAllocator = &codec{}
|
||||
|
||||
var identifiersMap sync.Map
|
||||
|
||||
type codecIdentifier struct {
|
||||
@ -133,17 +135,34 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
||||
}
|
||||
}
|
||||
|
||||
var strictDecodingErrs []error
|
||||
obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
|
||||
if err != nil {
|
||||
return nil, gvk, err
|
||||
}
|
||||
|
||||
if d, ok := obj.(runtime.NestedObjectDecoder); ok {
|
||||
if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
|
||||
if strictErr, ok := runtime.AsStrictDecodingError(err); obj != nil && ok {
|
||||
// save the strictDecodingError and let the caller decide what to do with it
|
||||
strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...)
|
||||
} else {
|
||||
return nil, gvk, err
|
||||
}
|
||||
}
|
||||
|
||||
if d, ok := obj.(runtime.NestedObjectDecoder); ok {
|
||||
if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
|
||||
if strictErr, ok := runtime.AsStrictDecodingError(err); ok {
|
||||
// save the strictDecodingError let and the caller decide what to do with it
|
||||
strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...)
|
||||
} else {
|
||||
return nil, gvk, err
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// aggregate the strict decoding errors into one
|
||||
var strictDecodingErr error
|
||||
if len(strictDecodingErrs) > 0 {
|
||||
strictDecodingErr = runtime.NewStrictDecodingError(strictDecodingErrs)
|
||||
}
|
||||
// if we specify a target, use generic conversion.
|
||||
if into != nil {
|
||||
// perform defaulting if requested
|
||||
@ -153,14 +172,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
||||
|
||||
// Short-circuit conversion if the into object is same object
|
||||
if into == obj {
|
||||
return into, gvk, nil
|
||||
return into, gvk, strictDecodingErr
|
||||
}
|
||||
|
||||
if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
|
||||
return nil, gvk, err
|
||||
}
|
||||
|
||||
return into, gvk, nil
|
||||
return into, gvk, strictDecodingErr
|
||||
}
|
||||
|
||||
// perform defaulting if requested
|
||||
@ -172,22 +191,43 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
|
||||
if err != nil {
|
||||
return nil, gvk, err
|
||||
}
|
||||
return out, gvk, nil
|
||||
return out, gvk, strictDecodingErr
|
||||
}
|
||||
|
||||
// EncodeWithAllocator ensures the provided object is output in the appropriate group and version, invoking
|
||||
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
|
||||
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
|
||||
func (c *codec) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
return c.encode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
// Encode ensures the provided object is output in the appropriate group and version, invoking
|
||||
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
|
||||
func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(c.Identifier(), c.doEncode, w)
|
||||
}
|
||||
return c.doEncode(obj, w)
|
||||
return c.encode(obj, w, nil)
|
||||
}
|
||||
|
||||
func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
func (c *codec) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(c.Identifier(), func(obj runtime.Object, w io.Writer) error { return c.doEncode(obj, w, memAlloc) }, w)
|
||||
}
|
||||
return c.doEncode(obj, w, memAlloc)
|
||||
}
|
||||
|
||||
func (c *codec) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
|
||||
encodeFn := c.encoder.Encode
|
||||
if memAlloc != nil {
|
||||
if encoder, supportsAllocator := c.encoder.(runtime.EncoderWithAllocator); supportsAllocator {
|
||||
encodeFn = func(obj runtime.Object, w io.Writer) error {
|
||||
return encoder.EncodeWithAllocator(obj, w, memAlloc)
|
||||
}
|
||||
} else {
|
||||
klog.V(6).Infof("a memory allocator was provided but the encoder %s doesn't implement the runtime.EncoderWithAllocator, using regular encoder.Encode method", c.encoder.Identifier())
|
||||
}
|
||||
}
|
||||
switch obj := obj.(type) {
|
||||
case *runtime.Unknown:
|
||||
return c.encoder.Encode(obj, w)
|
||||
return encodeFn(obj, w)
|
||||
case runtime.Unstructured:
|
||||
// An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
|
||||
// because the top-level type matches our desired destination type. actually send the object to the converter
|
||||
@ -196,14 +236,14 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
// avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
|
||||
objGVK := obj.GetObjectKind().GroupVersionKind()
|
||||
if len(objGVK.Version) == 0 {
|
||||
return c.encoder.Encode(obj, w)
|
||||
return encodeFn(obj, w)
|
||||
}
|
||||
targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
|
||||
if !ok {
|
||||
return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
|
||||
}
|
||||
if targetGVK == objGVK {
|
||||
return c.encoder.Encode(obj, w)
|
||||
return encodeFn(obj, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -225,7 +265,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
}
|
||||
}
|
||||
objectKind.SetGroupVersionKind(gvks[0])
|
||||
return c.encoder.Encode(obj, w)
|
||||
return encodeFn(obj, w)
|
||||
}
|
||||
|
||||
// Perform a conversion if necessary
|
||||
@ -241,7 +281,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
}
|
||||
|
||||
// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
|
||||
return c.encoder.Encode(out, w)
|
||||
return encodeFn(out, w)
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
|
59
vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
59
vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
@ -21,10 +21,12 @@ package runtime
|
||||
|
||||
// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
|
||||
// like this:
|
||||
// type MyAwesomeAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// ... // other fields
|
||||
// }
|
||||
//
|
||||
// type MyAwesomeAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// ... // other fields
|
||||
// }
|
||||
//
|
||||
// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
|
||||
//
|
||||
// TypeMeta is provided here for convenience. You may use it directly from this package or define
|
||||
@ -53,32 +55,37 @@ const (
|
||||
// various plugin types.
|
||||
//
|
||||
// // Internal package:
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.Object `json:"myPlugin"`
|
||||
// }
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.Object `json:"myPlugin"`
|
||||
// }
|
||||
//
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// // External package:
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.RawExtension `json:"myPlugin"`
|
||||
// }
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// type MyAPIObject struct {
|
||||
// runtime.TypeMeta `json:",inline"`
|
||||
// MyPlugin runtime.RawExtension `json:"myPlugin"`
|
||||
// }
|
||||
//
|
||||
// type PluginA struct {
|
||||
// AOption string `json:"aOption"`
|
||||
// }
|
||||
//
|
||||
// // On the wire, the JSON will look something like this:
|
||||
// {
|
||||
// "kind":"MyAPIObject",
|
||||
// "apiVersion":"v1",
|
||||
// "myPlugin": {
|
||||
// "kind":"PluginA",
|
||||
// "aOption":"foo",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "kind":"MyAPIObject",
|
||||
// "apiVersion":"v1",
|
||||
// "myPlugin": {
|
||||
// "kind":"PluginA",
|
||||
// "aOption":"foo",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
|
||||
// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
|
||||
|
1
vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user