vendor: update github.com/hashicorp/hcl/v2 to v2.19.1

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax
2023-10-19 14:49:10 +02:00
parent ad674e2666
commit 34b9a629a0
157 changed files with 20123 additions and 5438 deletions

View File

@ -49,6 +49,18 @@ type CapsuleOps struct {
// pointer identity of the encapsulated value.
RawEquals func(a, b interface{}) bool
// HashKey provides a hashing function for values of the corresponding
// capsule type. If defined, cty will use the resulting hashes as part
// of the implementation of sets whose element type is or contains the
// corresponding capsule type.
//
// If a capsule type defines HashValue then the function _must_ return
// an equal hash value for any two values that would cause Equals or
// RawEquals to return true when given those values. If a given type
// does not uphold that assumption then sets including this type will
// not behave correctly.
HashKey func(v interface{}) string
// ConversionFrom can provide conversions from the corresponding type to
// some other type when values of the corresponding type are used with
// the "convert" package. (The main cty package does not use this operation.)

View File

@ -43,14 +43,14 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion {
out = out.WithoutOptionalAttributesDeep()
if !isKnown {
return cty.UnknownVal(out), nil
return prepareUnknownResult(in.Range(), dynamicReplace(in.Type(), out)), nil
}
if isNull {
// We'll pass through nulls, albeit type converted, and let
// the caller deal with whatever handling they want to do in
// case null values are considered valid in some applications.
return cty.NullVal(out), nil
return cty.NullVal(dynamicReplace(in.Type(), out)), nil
}
}
@ -199,3 +199,64 @@ func retConversion(conv conversion) Conversion {
return conv(in, cty.Path(nil))
}
}
// prepareUnknownResult can apply value refinements to a returned unknown value
// in certain cases where characteristics of the source value or type can
// transfer into range constraints on the result value.
func prepareUnknownResult(sourceRange cty.ValueRange, targetTy cty.Type) cty.Value {
sourceTy := sourceRange.TypeConstraint()
ret := cty.UnknownVal(targetTy)
if sourceRange.DefinitelyNotNull() {
ret = ret.RefineNotNull()
}
switch {
case sourceTy.IsObjectType() && targetTy.IsMapType():
// A map built from an object type always has the same number of
// elements as the source type has attributes.
return ret.Refine().CollectionLength(len(sourceTy.AttributeTypes())).NewValue()
case sourceTy.IsTupleType() && targetTy.IsListType():
// A list built from a typle type always has the same number of
// elements as the source type has elements.
return ret.Refine().CollectionLength(sourceTy.Length()).NewValue()
case sourceTy.IsTupleType() && targetTy.IsSetType():
// When building a set from a tuple type we can't exactly constrain
// the length because some elements might coalesce, but we can
// guarantee an upper limit. We can also guarantee at least one
// element if the tuple isn't empty.
switch l := sourceTy.Length(); l {
case 0, 1:
return ret.Refine().CollectionLength(l).NewValue()
default:
return ret.Refine().
CollectionLengthLowerBound(1).
CollectionLengthUpperBound(sourceTy.Length()).
NewValue()
}
case sourceTy.IsCollectionType() && targetTy.IsCollectionType():
// NOTE: We only reach this function if there is an available
// conversion between the source and target type, so we don't
// need to repeat element type compatibility checks and such here.
//
// If the source value already has a refined length then we'll
// transfer those refinements to the result, because conversion
// does not change length (aside from set element coalescing).
b := ret.Refine()
if targetTy.IsSetType() {
if sourceRange.LengthLowerBound() > 0 {
// If the source has at least one element then the result
// must always have at least one too, because value coalescing
// cannot totally empty the set.
b = b.CollectionLengthLowerBound(1)
}
} else {
b = b.CollectionLengthLowerBound(sourceRange.LengthLowerBound())
}
b = b.CollectionLengthUpperBound(sourceRange.LengthUpperBound())
return b.NewValue()
default:
return ret
}
}

View File

@ -39,6 +39,11 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion {
return cty.NilVal, err
}
}
if val.IsNull() {
val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep())
}
elems = append(elems, val)
i++
@ -50,7 +55,7 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion {
if ety == cty.DynamicPseudoType {
return cty.ListValEmpty(val.Type().ElementType()), nil
}
return cty.ListValEmpty(ety), nil
return cty.ListValEmpty(ety.WithoutOptionalAttributesDeep()), nil
}
if !cty.CanListVal(elems) {
@ -88,6 +93,11 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion {
return cty.NilVal, err
}
}
if val.IsNull() {
val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep())
}
elems = append(elems, val)
i++
@ -99,7 +109,7 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion {
if ety == cty.DynamicPseudoType {
return cty.SetValEmpty(val.Type().ElementType()), nil
}
return cty.SetValEmpty(ety), nil
return cty.SetValEmpty(ety.WithoutOptionalAttributesDeep()), nil
}
if !cty.CanSetVal(elems) {
@ -180,7 +190,7 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv
if len(tupleEtys) == 0 {
// Empty tuple short-circuit
return func(val cty.Value, path cty.Path) (cty.Value, error) {
return cty.SetValEmpty(setEty), nil
return cty.SetValEmpty(setEty.WithoutOptionalAttributesDeep()), nil
}
}
@ -242,6 +252,11 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv
return cty.NilVal, err
}
}
if val.IsNull() {
val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep())
}
elems = append(elems, val)
i++
@ -265,7 +280,7 @@ func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) co
if len(tupleEtys) == 0 {
// Empty tuple short-circuit
return func(val cty.Value, path cty.Path) (cty.Value, error) {
return cty.ListValEmpty(listEty), nil
return cty.ListValEmpty(listEty.WithoutOptionalAttributesDeep()), nil
}
}
@ -357,7 +372,7 @@ func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) co
if len(objectAtys) == 0 {
// Empty object short-circuit
return func(val cty.Value, path cty.Path) (cty.Value, error) {
return cty.MapValEmpty(mapEty), nil
return cty.MapValEmpty(mapEty.WithoutOptionalAttributesDeep()), nil
}
}
@ -448,13 +463,28 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv
elemConvs[name] = getConversion(mapEty, objectAty, unsafe)
if elemConvs[name] == nil {
// If any of our element conversions are impossible, then the our
// whole conversion is impossible.
// This means that this conversion is impossible. Typically, we
// would give up at this point and declare the whole conversion
// impossible. But, if this attribute is optional then maybe we will
// be able to do this conversion anyway provided the actual concrete
// map doesn't have this value set.
//
// We only do this in "unsafe" mode, because we cannot guarantee
// that the returned conversion will actually succeed once applied.
if objType.AttributeOptional(name) && unsafe {
// This attribute is optional, so let's leave this conversion in
// as a nil, and we can error later if we actually have to
// convert this.
continue
}
// Otherwise, give up. This conversion is impossible as we have a
// required attribute that doesn't match the map's inner type.
return nil
}
}
// If we fall out here then a conversion is possible, using the
// If we fall out here then a conversion may be possible, using the
// element conversions in elemConvs
return func(val cty.Value, path cty.Path) (cty.Value, error) {
elems := make(map[string]cty.Value, len(elemConvs))
@ -474,12 +504,43 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv
Key: name,
}
conv := elemConvs[name.AsString()]
if conv != nil {
// There are 3 cases here:
// 1. This attribute is not in elemConvs
// 2. This attribute is in elemConvs and is not nil
// 3. This attribute is in elemConvs and is nil.
// In case 1, we do not enter any of the branches below. This case
// means the attribute type is the same between the map and the
// object, and we don't need to do any conversion.
if conv, ok := elemConvs[name.AsString()]; conv != nil {
// This is case 2. The attribute type is different between the
// map and the object, and we know how to convert between them.
// So, we reset val to be the converted value and carry on.
val, err = conv(val, elemPath)
if err != nil {
return cty.NilVal, err
}
} else if ok {
// This is case 3 and it is an error. The attribute types are
// different between the map and the object, but we cannot
// convert between them.
//
// Now typically, this would be picked earlier on when we were
// building elemConvs. However, in the case of optional
// attributes there was a chance we could still convert the
// overall object even if this particular attribute was not
// convertable. This is because it could have not been set in
// the map, and we could skip over it here and set a null value.
//
// Since we reached this branch, we know that map did actually
// contain a non-convertable optional attribute. This means we
// error.
return cty.NilVal, path.NewErrorf("map element type is incompatible with attribute %q: %s", name.AsString(), MismatchMessage(val.Type(), objType.AttributeType(name.AsString())))
}
if val.IsNull() {
val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep())
}
elems[name.AsString()] = val

View File

@ -31,3 +31,107 @@ func dynamicFixup(wantType cty.Type) conversion {
func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) {
return in, nil
}
// dynamicReplace aims to return the out type unchanged, but if it finds a
// dynamic type either directly or in any descendent elements it replaces them
// with the equivalent type from in.
//
// This function assumes that in and out are compatible from a Convert
// perspective, and will panic if it finds that they are not. For example if
// in is an object and out is a map, this function will still attempt to iterate
// through both as if they were the same.
func dynamicReplace(in, out cty.Type) cty.Type {
if in == cty.DynamicPseudoType || in == cty.NilType {
// Short circuit this case, there's no point worrying about this if in
// is a dynamic type or a nil type. Out is the best we can do.
return out
}
switch {
case out == cty.DynamicPseudoType:
// So replace out with in.
return in
case out.IsPrimitiveType(), out.IsCapsuleType():
// out is not dynamic and it doesn't contain descendent elements so just
// return it unchanged.
return out
case out.IsMapType():
var elemType cty.Type
// Maps are compatible with other maps or objects.
if in.IsMapType() {
elemType = dynamicReplace(in.ElementType(), out.ElementType())
}
if in.IsObjectType() {
var types []cty.Type
for _, t := range in.AttributeTypes() {
types = append(types, t)
}
unifiedType, _ := unify(types, true)
elemType = dynamicReplace(unifiedType, out.ElementType())
}
return cty.Map(elemType)
case out.IsObjectType():
// Objects are compatible with other objects and maps.
outTypes := map[string]cty.Type{}
if in.IsMapType() {
for attr, attrType := range out.AttributeTypes() {
outTypes[attr] = dynamicReplace(in.ElementType(), attrType)
}
}
if in.IsObjectType() {
for attr, attrType := range out.AttributeTypes() {
if !in.HasAttribute(attr) {
// If in does not have this attribute, then it is an
// optional attribute and there is nothing we can do except
// to return the type from out even if it is dynamic.
outTypes[attr] = attrType
continue
}
outTypes[attr] = dynamicReplace(in.AttributeType(attr), attrType)
}
}
return cty.Object(outTypes)
case out.IsSetType():
var elemType cty.Type
// Sets are compatible with other sets, lists, tuples.
if in.IsSetType() || in.IsListType() {
elemType = dynamicReplace(in.ElementType(), out.ElementType())
}
if in.IsTupleType() {
unifiedType, _ := unify(in.TupleElementTypes(), true)
elemType = dynamicReplace(unifiedType, out.ElementType())
}
return cty.Set(elemType)
case out.IsListType():
var elemType cty.Type
// Lists are compatible with other lists, sets, and tuples.
if in.IsSetType() || in.IsListType() {
elemType = dynamicReplace(in.ElementType(), out.ElementType())
}
if in.IsTupleType() {
unifiedType, _ := unify(in.TupleElementTypes(), true)
elemType = dynamicReplace(unifiedType, out.ElementType())
}
return cty.List(elemType)
case out.IsTupleType():
// Tuples are only compatible with other tuples
var types []cty.Type
for ix := 0; ix < len(out.TupleElementTypes()); ix++ {
types = append(types, dynamicReplace(in.TupleElementType(ix), out.TupleElementType(ix)))
}
return cty.Tuple(types)
default:
panic("unrecognized type " + out.FriendlyName())
}
}

View File

@ -80,13 +80,19 @@ func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion {
}
}
if val.IsNull() {
// Strip optional attributes out of the embedded type for null
// values.
val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep())
}
attrVals[name] = val
}
for name := range outOptionals {
if _, exists := attrVals[name]; !exists {
wantTy := outAtys[name]
attrVals[name] = cty.NullVal(wantTy)
attrVals[name] = cty.NullVal(wantTy.WithoutOptionalAttributesDeep())
}
}

View File

@ -40,7 +40,7 @@ func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion {
// This is a convenience wrapper around calling GetConversionUnsafe and then
// immediately passing the given value to the resulting function.
func Convert(in cty.Value, want cty.Type) (cty.Value, error) {
if in.Type().Equals(want) {
if in.Type().Equals(want.WithoutOptionalAttributesDeep()) {
return in, nil
}

View File

@ -447,7 +447,6 @@ func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type,
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
// Shouldn't be reachable, since we were able to unify
return unifyTupleTypesToList(types, unsafe)
}
}
@ -483,8 +482,8 @@ func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversio
conversions[i] = GetConversion(ty, retTy)
}
if conversions[i] == nil {
// Shouldn't be reachable, since we were able to unify
return unifyObjectTypesToMap(types, unsafe)
// no conversion was found
return cty.NilType, nil
}
}
return retTy, conversions

26
vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Package ctystrings is a collection of string manipulation utilities which
// intend to help application developers implement string-manipulation
// functionality in a way that respects the cty model of strings, even when
// they are working in the realm of Go strings.
//
// cty strings are, internally, NFC-normalized as defined in Unicode Standard
// Annex #15 and encoded as UTF-8.
//
// When working with [cty.Value] of string type cty manages this
// automatically as an implementation detail, but when applications call
// [Value.AsString] they will receive a value that has been subjected to that
// normalization, and so may need to take that normalization into account when
// manipulating the resulting string or comparing it with other Go strings
// that did not originate in a [cty.Value].
//
// Although the core representation of [cty.String] only considers whole
// strings, it's also conventional in other locations such as the standard
// library functions to consider strings as being sequences of grapheme
// clusters as defined by Unicode Standard Annex #29, which adds further
// rules about combining multiple consecutive codepoints together into a
// single user-percieved character. Functions that work with substrings should
// always use grapheme clusters as their smallest unit of splitting strings,
// and never break strings in the middle of a grapheme cluster. The functions
// in this package respect that convention unless otherwise stated in their
// documentation.
package ctystrings

View File

@ -0,0 +1,14 @@
package ctystrings
import (
"golang.org/x/text/unicode/norm"
)
// Normalize applies NFC normalization to the given string, returning the
// transformed string.
//
// This function achieves the same effect as wrapping a string in a value
// using [cty.StringVal] and then unwrapping it again using [Value.AsString].
func Normalize(str string) string {
return norm.NFC.String(str)
}

View File

@ -0,0 +1,139 @@
package ctystrings
import (
"fmt"
"unicode/utf8"
"github.com/apparentlymart/go-textseg/v13/textseg"
"golang.org/x/text/unicode/norm"
)
// SafeKnownPrefix takes a string intended to represent a known prefix of
// another string and modifies it so that it would be safe to use with
// byte-based prefix matching against another NFC-normalized string. It
// also takes into account grapheme cluster boundaries and trims off any
// suffix that could potentially be an incomplete grapheme cluster.
//
// Specifically, SafeKnownPrefix first applies NFC normalization to the prefix
// and then trims off one or more characters from the end of the string which
// could potentially be transformed into a different character if another
// string were appended to it. For example, a trailing latin letter will
// typically be trimmed because appending a combining diacritic mark would
// transform it into a different character.
//
// This transformation is important whenever the remainder of the string is
// arbitrary user input not directly controlled by the application. If an
// application can guarantee that the remainder of the string will not begin
// with combining marks then it is safe to instead just normalize the prefix
// string with [Normalize].
//
// Note that this function only takes into account normalization boundaries
// and does _not_ take into account grapheme cluster boundaries as defined
// by Unicode Standard Annex #29.
func SafeKnownPrefix(prefix string) string {
prefix = Normalize(prefix)
// Our starting approach here is essentially what a streaming parser would
// do when consuming a Unicode string in chunks and needing to determine
// what prefix of the current buffer is safe to process without waiting for
// more information, which is described in TR15 section 13.1
// "Buffering with Unicode Normalization":
// https://unicode.org/reports/tr15/#Buffering_with_Unicode_Normalization
//
// The general idea here is to find the last character in the string that
// could potentially start a sequence of codepoints that would combine
// together, and then truncate the string to exclude that character and
// everything after it.
form := norm.NFC
lastBoundary := form.LastBoundary([]byte(prefix))
if lastBoundary != -1 && lastBoundary != len(prefix) {
prefix = prefix[:lastBoundary]
// If we get here then we've already shortened the prefix and so
// further analysis below is unnecessary because it would be relying
// on an incomplete prefix anyway.
return prefix
}
// Now we'll use the textseg package's grapheme cluster scanner to scan
// as far through the string as we can without the scanner telling us
// that it would need more bytes to decide.
//
// This step is conservative because the grapheme cluster rules are not
// designed with prefix-matching in mind. In the base case we'll just
// always discard the last grapheme cluster, although we do have some
// special cases for trailing codepoints that can't possibly combine with
// subsequent codepoints to form a single grapheme cluster and which seem
// likely to arise often in practical use.
remain := []byte(prefix)
prevBoundary := 0
thisBoundary := 0
for len(remain) > 0 {
advance, _, err := textseg.ScanGraphemeClusters(remain, false)
if err != nil {
// ScanGraphemeClusters should never return an error because
// any sequence of valid UTF-8 encodings is valid input.
panic(fmt.Sprintf("textseg.ScanGraphemeClusters returned error: %s", err))
}
if advance == 0 {
// If we have at least one byte remaining but the scanner cannot
// advance then that means the remainder might be an incomplete
// grapheme cluster and so we need to stop here, discarding the
// rest of the input. However, we do now know that we can safely
// include what we found on the previous iteration of this loop.
prevBoundary = thisBoundary
break
}
prevBoundary = thisBoundary
thisBoundary += advance
remain = remain[advance:]
}
// This is our heuristic for detecting cases where we can be sure that
// the above algorithm was too conservative because the last segment
// we found is definitely not subject to the grapheme cluster "do not split"
// rules.
suspect := prefix[prevBoundary:thisBoundary]
if sequenceMustEndGraphemeCluster(suspect) {
prevBoundary = thisBoundary
}
return prefix[:prevBoundary]
}
// sequenceMustEndGraphemeCluster is a heuristic we use to avoid discarding
// the final grapheme cluster of a prefix in SafeKnownPrefix by recognizing
// that a particular sequence is one known to not be subject to any of
// the UAX29 "do not break" rules.
//
// If this function returns true then it is safe to include the given byte
// sequence at the end of a safe prefix. Otherwise we don't know whether or
// not it is safe.
func sequenceMustEndGraphemeCluster(s string) bool {
// For now we're only considering sequences that represent a single
// codepoint. We'll assume that any sequence of two or more codepoints
// that could be a grapheme cluster might be extendable.
if utf8.RuneCountInString(s) != 1 {
return false
}
r, _ := utf8.DecodeRuneInString(s)
// Our initial ruleset is focused on characters that are commonly used
// as delimiters in text intended for both human and machine use, such
// as JSON documents.
//
// We don't include any letters or digits of any script here intentionally
// because those are the ones most likely to be subject to combining rules
// in either current or future Unicode specifications.
//
// We can safely grow this set over time, but we should be very careful
// about shrinking it because it could cause value refinements to loosen
// and thus cause results that were once known to become unknown.
switch r {
case '-', '_', ':', ';', '/', '\\', ',', '.', '(', ')', '{', '}', '[', ']', '|', '?', '!', '~', ' ', '\t', '@', '#', '$', '%', '^', '&', '*', '+', '"', '\'':
return true
default:
return false
}
}

View File

@ -66,7 +66,7 @@ func elementIterator(val Value) ElementIterator {
idx: -1,
}
case val.ty.IsSetType():
rawSet := val.v.(set.Set)
rawSet := val.v.(set.Set[interface{}])
return &setElementIterator{
ety: val.ty.ElementType(),
setIt: rawSet.Iterator(),
@ -139,7 +139,7 @@ func (it *mapElementIterator) Next() bool {
type setElementIterator struct {
ety Type
setIt *set.Iterator
setIt *set.Iterator[interface{}]
}
func (it *setElementIterator) Element() (Value, Value) {

View File

@ -10,6 +10,9 @@ type Parameter struct {
// value, but callers may use it for documentation, etc.
Name string
// Description is an optional description for the argument.
Description string
// A type that any argument for this parameter must conform to.
// cty.DynamicPseudoType can be used, either at top-level or nested
// in a parameterized type, to indicate that any type should be

View File

@ -14,6 +14,9 @@ type Function struct {
// Spec is the specification of a function, used to instantiate
// a new Function.
type Spec struct {
// Description is an optional description for the function specification.
Description string
// Params is a description of the positional parameters for the function.
// The standard checking logic rejects any calls that do not provide
// arguments conforming to this definition, freeing the function
@ -36,6 +39,19 @@ type Spec struct {
// depending on its arguments.
Type TypeFunc
// RefineResult is an optional callback for describing additional
// refinements for the result value beyond what can be described using
// a type constraint.
//
// A refinement callback should always return the same builder it was
// given, typically after modifying it using the methods of
// [cty.RefinementBuilder].
//
// Any refinements described by this callback must hold for the entire
// range of results from the function. For refinements that only apply
// to certain results, use direct refinement within [Impl] instead.
RefineResult func(*cty.RefinementBuilder) *cty.RefinementBuilder
// Impl is the ImplFunc that implements the function's behavior.
//
// Functions are expected to behave as pure functions, and not create
@ -230,6 +246,22 @@ func (f Function) Call(args []cty.Value) (val cty.Value, err error) {
return cty.NilVal, err
}
if refineResult := f.spec.RefineResult; refineResult != nil {
// If this function has a refinement callback then we'll refine
// our result value in the same way regardless of how we return.
// It's the function author's responsibility to ensure that the
// refinements they specify are valid for the full range of possible
// return values from the function. If not, this will panic when
// detecting an inconsistency.
defer func() {
if val != cty.NilVal {
if val.IsKnown() || val.Type() != cty.DynamicPseudoType {
val = val.RefineWith(refineResult)
}
}
}()
}
// Type checking already dealt with most situations relating to our
// parameter specification, but we still need to deal with unknown
// values and marked values.
@ -344,3 +376,62 @@ func (f Function) VarParam() *Parameter {
ret := *f.spec.VarParam
return &ret
}
// Description returns a human-readable description of the function.
func (f Function) Description() string {
return f.spec.Description
}
// WithNewDescriptions returns a new function that has the same signature
// and implementation as the receiver but has the function description and
// the parameter descriptions replaced with those given in the arguments.
//
// All descriptions may be given as an empty string to specify that there
// should be no description at all.
//
// The paramDescs argument must match the number of parameters
// the reciever expects, or this function will panic. If the function has a
// VarParam then that counts as one parameter for the sake of this rule. The
// given descriptions will be assigned in order starting with the positional
// arguments in their declared order, followed by the variadic parameter if
// any.
//
// As a special case, WithNewDescriptions will accept a paramDescs which
// does not cover the reciever's variadic parameter (if any), so that it's
// possible to add a variadic parameter to a function which didn't previously
// have one without that being a breaking change for an existing caller using
// WithNewDescriptions against that function. In this case the base description
// of the variadic parameter will be preserved.
func (f Function) WithNewDescriptions(funcDesc string, paramDescs []string) Function {
retSpec := *f.spec // shallow copy of the reciever
retSpec.Description = funcDesc
retSpec.Params = make([]Parameter, len(f.spec.Params))
copy(retSpec.Params, f.spec.Params) // shallow copy of positional parameters
if f.spec.VarParam != nil {
retVarParam := *f.spec.VarParam // shallow copy of variadic parameter
retSpec.VarParam = &retVarParam
}
if retSpec.VarParam != nil {
if with, without := len(retSpec.Params)+1, len(retSpec.Params); len(paramDescs) != with && len(paramDescs) != without {
panic(fmt.Sprintf("paramDescs must have length of either %d or %d", with, without))
}
} else {
if want := len(retSpec.Params); len(paramDescs) != want {
panic(fmt.Sprintf("paramDescs must have length %d", want))
}
}
posParamDescs := paramDescs[:len(retSpec.Params)]
varParamDescs := paramDescs[len(retSpec.Params):] // guaranteed to be zero or one elements because of the rules above
for i, desc := range posParamDescs {
retSpec.Params[i].Description = desc
}
for _, desc := range varParamDescs {
retSpec.VarParam.Description = desc
}
return New(&retSpec)
}

View File

@ -6,6 +6,7 @@ import (
)
var NotFunc = function.New(&function.Spec{
Description: `Applies the logical NOT operation to the given boolean value.`,
Params: []function.Parameter{
{
Name: "val",
@ -14,13 +15,15 @@ var NotFunc = function.New(&function.Spec{
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return args[0].Not(), nil
},
})
var AndFunc = function.New(&function.Spec{
Description: `Applies the logical AND operation to the given boolean values.`,
Params: []function.Parameter{
{
Name: "a",
@ -35,13 +38,15 @@ var AndFunc = function.New(&function.Spec{
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return args[0].And(args[1]), nil
},
})
var OrFunc = function.New(&function.Spec{
Description: `Applies the logical OR operation to the given boolean values.`,
Params: []function.Parameter{
{
Name: "a",
@ -56,7 +61,8 @@ var OrFunc = function.New(&function.Spec{
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return args[0].Or(args[1]), nil
},

View File

@ -30,6 +30,7 @@ func BytesVal(buf []byte) cty.Value {
// BytesLen is a Function that returns the length of the buffer encapsulated
// in a Bytes value.
var BytesLenFunc = function.New(&function.Spec{
Description: `Returns the total number of bytes in the given buffer.`,
Params: []function.Parameter{
{
Name: "buf",
@ -37,7 +38,8 @@ var BytesLenFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
bufPtr := args[0].EncapsulatedValue().(*[]byte)
return cty.NumberIntVal(int64(len(*bufPtr))), nil
@ -46,6 +48,7 @@ var BytesLenFunc = function.New(&function.Spec{
// BytesSlice is a Function that returns a slice of the given Bytes value.
var BytesSliceFunc = function.New(&function.Spec{
Description: `Extracts a subslice from the given buffer.`,
Params: []function.Parameter{
{
Name: "buf",
@ -63,7 +66,8 @@ var BytesSliceFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(Bytes),
Type: function.StaticReturnType(Bytes),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
bufPtr := args[0].EncapsulatedValue().(*[]byte)

View File

@ -12,6 +12,7 @@ import (
)
var HasIndexFunc = function.New(&function.Spec{
Description: `Returns true if if the given collection can be indexed with the given key without producing an error, or false otherwise.`,
Params: []function.Parameter{
{
Name: "collection",
@ -31,12 +32,14 @@ var HasIndexFunc = function.New(&function.Spec{
}
return cty.Bool, nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].HasIndex(args[1]), nil
},
})
var IndexFunc = function.New(&function.Spec{
Description: `Returns the element with the given key from the given collection, or raises an error if there is no such element.`,
Params: []function.Parameter{
{
Name: "collection",
@ -106,11 +109,13 @@ var IndexFunc = function.New(&function.Spec{
})
var LengthFunc = function.New(&function.Spec{
Description: `Returns the number of elements in the given collection.`,
Params: []function.Parameter{
{
Name: "collection",
Type: cty.DynamicPseudoType,
AllowDynamicType: true,
AllowUnknown: true,
AllowMarked: true,
},
},
@ -121,12 +126,14 @@ var LengthFunc = function.New(&function.Spec{
}
return cty.Number, nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].Length(), nil
},
})
var ElementFunc = function.New(&function.Spec{
Description: `Returns the element with the given index from the given list or tuple, applying the modulo operation to the given index if it's greater than the number of elements.`,
Params: []function.Parameter{
{
Name: "list",
@ -206,9 +213,11 @@ var ElementFunc = function.New(&function.Spec{
// CoalesceListFunc is a function that takes any number of list arguments
// and returns the first one that isn't empty.
var CoalesceListFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Returns the first of the given sequences that has a length greater than zero.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "vals",
Description: `List or tuple values to test in the given order.`,
Type: cty.DynamicPseudoType,
AllowUnknown: true,
AllowDynamicType: true,
@ -245,6 +254,7 @@ var CoalesceListFunc = function.New(&function.Spec{
return last, nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
for _, arg := range args {
if !arg.IsKnown() {
@ -270,13 +280,15 @@ var CoalesceListFunc = function.New(&function.Spec{
// CompactFunc is a function that takes a list of strings and returns a new list
// with any empty string elements removed.
var CompactFunc = function.New(&function.Spec{
Description: `Removes all empty string elements from the given list of strings.`,
Params: []function.Parameter{
{
Name: "list",
Type: cty.List(cty.String),
},
},
Type: function.StaticReturnType(cty.List(cty.String)),
Type: function.StaticReturnType(cty.List(cty.String)),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
listVal := args[0]
if !listVal.IsWhollyKnown() {
@ -306,6 +318,7 @@ var CompactFunc = function.New(&function.Spec{
// ContainsFunc is a function that determines whether a given list or
// set contains a given single value as one of its elements.
var ContainsFunc = function.New(&function.Spec{
Description: `Returns true if the given value is a value in the given list, tuple, or set, or false otherwise.`,
Params: []function.Parameter{
{
Name: "list",
@ -316,7 +329,8 @@ var ContainsFunc = function.New(&function.Spec{
Type: cty.DynamicPseudoType,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
arg := args[0]
ty := arg.Type()
@ -364,6 +378,7 @@ var ContainsFunc = function.New(&function.Spec{
// DistinctFunc is a function that takes a list and returns a new list
// with any duplicate elements removed.
var DistinctFunc = function.New(&function.Spec{
Description: `Removes any duplicate values from the given list, preserving the order of remaining elements.`,
Params: []function.Parameter{
{
Name: "list",
@ -373,6 +388,7 @@ var DistinctFunc = function.New(&function.Spec{
Type: func(args []cty.Value) (cty.Type, error) {
return args[0].Type(), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
listVal := args[0]
@ -399,14 +415,17 @@ var DistinctFunc = function.New(&function.Spec{
// ChunklistFunc is a function that splits a single list into fixed-size chunks,
// returning a list of lists.
var ChunklistFunc = function.New(&function.Spec{
Description: `Splits a single list into multiple lists where each has at most the given number of elements.`,
Params: []function.Parameter{
{
Name: "list",
Description: `The list to split into chunks.`,
Type: cty.List(cty.DynamicPseudoType),
AllowMarked: true,
},
{
Name: "size",
Description: `The maximum length of each chunk. All but the last element of the result is guaranteed to be of exactly this size.`,
Type: cty.Number,
AllowMarked: true,
},
@ -414,6 +433,7 @@ var ChunklistFunc = function.New(&function.Spec{
Type: func(args []cty.Value) (cty.Type, error) {
return cty.List(args[0].Type()), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
listVal := args[0]
sizeVal := args[1]
@ -471,6 +491,7 @@ var ChunklistFunc = function.New(&function.Spec{
// FlattenFunc is a function that takes a list and replaces any elements
// that are lists with a flattened sequence of the list contents.
var FlattenFunc = function.New(&function.Spec{
Description: `Transforms a list, set, or tuple value into a tuple by replacing any given elements that are themselves sequences with a flattened tuple of all of the nested elements concatenated together.`,
Params: []function.Parameter{
{
Name: "list",
@ -500,6 +521,7 @@ var FlattenFunc = function.New(&function.Spec{
}
return cty.Tuple(tys), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
inputList := args[0]
@ -525,6 +547,7 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) {
if len(flattenListMarks) > 0 {
markses = append(markses, flattenListMarks)
}
if !flattenList.Length().IsKnown() {
// If we don't know the length of what we're flattening then we can't
// predict the length of our result yet either.
@ -542,7 +565,7 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) {
isKnown = false
}
if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() {
if !val.IsNull() && (val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType()) {
if !val.IsKnown() {
isKnown = false
_, unknownMarks := val.Unmark()
@ -566,9 +589,11 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) {
// KeysFunc is a function that takes a map and returns a sorted list of the map keys.
var KeysFunc = function.New(&function.Spec{
Description: `Returns a list of the keys of the given map in lexicographical order.`,
Params: []function.Parameter{
{
Name: "inputMap",
Description: `The map to extract keys from. May instead be an object-typed value, in which case the result is a tuple of the object attributes.`,
Type: cty.DynamicPseudoType,
AllowUnknown: true,
AllowMarked: true,
@ -595,6 +620,7 @@ var KeysFunc = function.New(&function.Spec{
return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type")
}
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
// We must unmark the value before we can use ElementIterator on it, and
// then re-apply the same marks (possibly none) when we return. Since we
@ -641,6 +667,7 @@ var KeysFunc = function.New(&function.Spec{
// LookupFunc is a function that performs dynamic lookups of map types.
var LookupFunc = function.New(&function.Spec{
Description: `Returns the value of the element with the given key from the given map, or returns the default value if there is no such element.`,
Params: []function.Parameter{
{
Name: "inputMap",
@ -733,7 +760,8 @@ var LookupFunc = function.New(&function.Spec{
// If more than one given map or object defines the same key then the one that
// is later in the argument sequence takes precedence.
var MergeFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Merges all of the elements from the given maps into a single map, or the attributes from given objects into a single object.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "maps",
Type: cty.DynamicPseudoType,
@ -814,6 +842,7 @@ var MergeFunc = function.New(&function.Spec{
return cty.Object(attrs), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
outputMap := make(map[string]cty.Value)
var markses []cty.ValueMarks // remember any marked maps/objects we find
@ -849,6 +878,7 @@ var MergeFunc = function.New(&function.Spec{
// ReverseListFunc takes a sequence and produces a new sequence of the same length
// with all of the same elements as the given sequence but in reverse order.
var ReverseListFunc = function.New(&function.Spec{
Description: `Returns the given list with its elements in reverse order.`,
Params: []function.Parameter{
{
Name: "list",
@ -872,6 +902,7 @@ var ReverseListFunc = function.New(&function.Spec{
return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName())
}
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
in, marks := args[0].Unmark()
inVals := in.AsValueSlice()
@ -897,11 +928,14 @@ var ReverseListFunc = function.New(&function.Spec{
// preserving the ordering of all of the input lists. Otherwise the result is a
// set of tuples.
var SetProductFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Calculates the cartesian product of two or more sets.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "sets",
Type: cty.DynamicPseudoType,
AllowMarked: true,
Name: "sets",
Description: "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering",
Type: cty.DynamicPseudoType,
AllowMarked: true,
AllowUnknown: true,
},
Type: func(args []cty.Value) (retType cty.Type, err error) {
if len(args) < 2 {
@ -943,6 +977,7 @@ var SetProductFunc = function.New(&function.Spec{
}
return cty.Set(cty.Tuple(elemTys)), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
ety := retType.ElementType()
var retMarks cty.ValueMarks
@ -955,7 +990,7 @@ var SetProductFunc = function.New(&function.Spec{
// Continue processing after we find an argument with unknown
// length to ensure that we cover all the marks
if !arg.Length().IsKnown() {
if !(arg.IsKnown() && arg.Length().IsKnown()) {
hasUnknownLength = true
continue
}
@ -967,7 +1002,62 @@ var SetProductFunc = function.New(&function.Spec{
}
if hasUnknownLength {
return cty.UnknownVal(retType).WithMarks(retMarks), nil
defer func() {
// We're definitely going to return from somewhere in this
// branch and however we do it we must reapply the marks
// on the way out.
ret = ret.WithMarks(retMarks)
}()
ret := cty.UnknownVal(retType)
// Even if we don't know the exact length we may be able to
// constrain the upper and lower bounds of the resulting length.
maxLength := 1
for _, arg := range args {
arg, _ := arg.Unmark() // safe to discard marks because "retMarks" already contains them all
argRng := arg.Range()
ty := argRng.TypeConstraint()
var argMaxLen int
if ty.IsCollectionType() {
argMaxLen = argRng.LengthUpperBound()
} else if ty.IsTupleType() {
argMaxLen = ty.Length()
} else {
// Should not get here but if we do then we'll just
// bail out with an unrefined unknown value.
return ret, nil
}
// The upper bound of a totally-unrefined collection is
// math.MaxInt, which will quickly get us to integer overflow
// here, and so out of pragmatism we'll just impose a reasonable
// upper limit on what is a useful bound to track and return
// unrefined for unusually-large input.
if argMaxLen > 1024 { // arbitrarily-decided threshold
return ret, nil
}
maxLength *= argMaxLen
if maxLength > 2048 { // arbitrarily-decided threshold
return ret, nil
}
if maxLength < 0 { // Seems like we already overflowed, then.
return ret, nil
}
}
if maxLength == 0 {
// This refinement will typically allow the unknown value to
// collapse into a known empty collection.
ret = ret.Refine().CollectionLength(0).NewValue()
} else {
// If we know there's a nonzero maximum number of elements then
// set element coalescing cannot reduce to fewer than one
// element.
ret = ret.Refine().
CollectionLengthLowerBound(1).
CollectionLengthUpperBound(maxLength).
NewValue()
}
return ret, nil
}
if total == 0 {
@ -1037,6 +1127,7 @@ var SetProductFunc = function.New(&function.Spec{
// SliceFunc is a function that extracts some consecutive elements
// from within a list.
var SliceFunc = function.New(&function.Spec{
Description: `Extracts a subslice of the given list or tuple value.`,
Params: []function.Parameter{
{
Name: "list",
@ -1079,6 +1170,7 @@ var SliceFunc = function.New(&function.Spec{
}
return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
inputList, marks := args[0].Unmark()
@ -1158,9 +1250,10 @@ func sliceIndexes(args []cty.Value) (int, int, bool, error) {
// ValuesFunc is a function that returns a list of the map values,
// in the order of the sorted keys.
var ValuesFunc = function.New(&function.Spec{
Description: `Returns the values of elements of a given map, or the values of attributes of a given object, in lexicographic order by key or attribute name.`,
Params: []function.Parameter{
{
Name: "values",
Name: "mapping",
Type: cty.DynamicPseudoType,
AllowMarked: true,
},
@ -1192,6 +1285,7 @@ var ValuesFunc = function.New(&function.Spec{
}
return cty.NilType, errors.New("values() requires a map as the first argument")
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
mapVar := args[0]
@ -1225,6 +1319,7 @@ var ValuesFunc = function.New(&function.Spec{
// ZipmapFunc is a function that constructs a map from a list of keys
// and a corresponding list of values.
var ZipmapFunc = function.New(&function.Spec{
Description: `Constructs a map from a list of keys and a corresponding list of values, which must both be of the same length.`,
Params: []function.Parameter{
{
Name: "keys",
@ -1279,6 +1374,7 @@ var ZipmapFunc = function.New(&function.Spec{
return cty.NilType, errors.New("values argument must be a list or tuple value")
}
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
keys := args[0]
values := args[1]

View File

@ -1,6 +1,7 @@
package stdlib
import (
"fmt"
"strconv"
"github.com/zclconf/go-cty/cty"
@ -18,6 +19,7 @@ import (
// a tuple.
func MakeToFunc(wantTy cty.Type) function.Function {
return function.New(&function.Spec{
Description: fmt.Sprintf("Converts the given value to %s, or raises an error if that conversion is impossible.", wantTy.FriendlyName()),
Params: []function.Parameter{
{
Name: "v",
@ -85,3 +87,36 @@ func MakeToFunc(wantTy cty.Type) function.Function {
},
})
}
// AssertNotNullFunc is a function which does nothing except return an error
// if the argument given to it is null.
//
// This could be useful in some cases where the automatic refinment of
// nullability isn't precise enough, because the result is guaranteed to not
// be null and can therefore allow downstream comparisons to null to return
// a known value even if the value is otherwise unknown.
var AssertNotNullFunc = function.New(&function.Spec{
Description: "Returns the given value varbatim if it is non-null, or raises an error if it's null.",
Params: []function.Parameter{
{
Name: "v",
Type: cty.DynamicPseudoType,
// NOTE: We intentionally don't set AllowNull here, and so
// the function system will automatically reject a null argument
// for us before calling Impl.
},
},
Type: func(args []cty.Value) (cty.Type, error) {
return args[0].Type(), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
// Our argument doesn't set AllowNull: true, so we're guaranteed to
// have a non-null value in args[0].
return args[0], nil
},
})
func AssertNotNull(v cty.Value) (cty.Value, error) {
return AssertNotNullFunc.Call([]cty.Value{v})
}

View File

@ -11,6 +11,7 @@ import (
)
var CSVDecodeFunc = function.New(&function.Spec{
Description: `Parses the given string as Comma Separated Values (as defined by RFC 4180) and returns a map of objects representing the table of data, using the first row as a header row to define the object attributes.`,
Params: []function.Parameter{
{
Name: "str",
@ -42,6 +43,7 @@ var CSVDecodeFunc = function.New(&function.Spec{
}
return cty.List(cty.Object(atys)), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
ety := retType.ElementType()
atys := ety.AttributeTypes()

View File

@ -12,6 +12,7 @@ import (
)
var FormatDateFunc = function.New(&function.Spec{
Description: `Formats a timestamp given in RFC 3339 syntax into another timestamp in some other machine-oriented time syntax, as described in the format string.`,
Params: []function.Parameter{
{
Name: "format",
@ -22,7 +23,8 @@ var FormatDateFunc = function.New(&function.Spec{
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
formatStr := args[0].AsString()
timeStr := args[1].AsString()
@ -205,6 +207,7 @@ var FormatDateFunc = function.New(&function.Spec{
// TimeAddFunc is a function that adds a duration to a timestamp, returning a new timestamp.
var TimeAddFunc = function.New(&function.Spec{
Description: `Adds the duration represented by the given duration string to the given RFC 3339 timestamp string, returning another RFC 3339 timestamp.`,
Params: []function.Parameter{
{
Name: "timestamp",
@ -279,67 +282,6 @@ func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) {
return FormatDateFunc.Call([]cty.Value{format, timestamp})
}
func parseTimestamp(ts string) (time.Time, error) {
t, err := time.Parse(time.RFC3339, ts)
if err != nil {
switch err := err.(type) {
case *time.ParseError:
// If err is s time.ParseError then its string representation is not
// appropriate since it relies on details of Go's strange date format
// representation, which a caller of our functions is not expected
// to be familiar with.
//
// Therefore we do some light transformation to get a more suitable
// error that should make more sense to our callers. These are
// still not awesome error messages, but at least they refer to
// the timestamp portions by name rather than by Go's example
// values.
if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" {
// For some reason err.Message is populated with a ": " prefix
// by the time package.
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message)
}
var what string
switch err.LayoutElem {
case "2006":
what = "year"
case "01":
what = "month"
case "02":
what = "day of month"
case "15":
what = "hour"
case "04":
what = "minute"
case "05":
what = "second"
case "Z07:00":
what = "UTC offset"
case "T":
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'")
case ":", "-":
if err.ValueElem == "" {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem)
} else {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem)
}
default:
// Should never get here, because time.RFC3339 includes only the
// above portions, but since that might change in future we'll
// be robust here.
what = "timestamp segment"
}
if err.ValueElem == "" {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what)
} else {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what)
}
}
return time.Time{}, err
}
return t, nil
}
// splitDataFormat is a bufio.SplitFunc used to tokenize a date format.
func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) {
if len(data) == 0 {
@ -416,6 +358,75 @@ func startsDateFormatVerb(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z')
}
func parseTimestamp(ts string) (time.Time, error) {
t, err := parseStrictRFC3339(ts)
if err != nil {
switch err := err.(type) {
case *time.ParseError:
// If err is s time.ParseError then its string representation is not
// appropriate since it relies on details of Go's strange date format
// representation, which a caller of our functions is not expected
// to be familiar with.
//
// Therefore we do some light transformation to get a more suitable
// error that should make more sense to our callers. These are
// still not awesome error messages, but at least they refer to
// the timestamp portions by name rather than by Go's example
// values.
if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" {
// For some reason err.Message is populated with a ": " prefix
// by the time package.
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message)
}
var what string
switch err.LayoutElem {
case "2006":
what = "year"
case "01":
what = "month"
case "02":
what = "day of month"
case "15":
what = "hour"
case "04":
what = "minute"
case "05":
what = "second"
case "Z07:00":
what = "UTC offset"
case "T":
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'")
case ":", "-":
if err.ValueElem == "" {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem)
} else {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem)
}
default:
// Should never get here, because RFC3339 includes only the
// above portions.
what = "timestamp segment"
}
if err.ValueElem == "" {
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what)
} else {
switch {
case what == "hour" && strings.Contains(err.ValueElem, ":"):
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must be between 0 and 23 inclusive")
case what == "hour" && len(err.ValueElem) != 2:
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must have exactly two digits")
case what == "minute" && len(err.ValueElem) != 2:
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: minute must have exactly two digits")
default:
return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what)
}
}
}
return time.Time{}, err
}
return t, nil
}
// TimeAdd adds a duration to a timestamp, returning a new timestamp.
//
// In the HCL language, timestamps are conventionally represented as

View File

@ -0,0 +1,219 @@
package stdlib
import (
"errors"
"strconv"
"time"
)
// This file inlines some RFC3339 parsing code that was added to the Go standard
// library's "time" package during the Go 1.20 development period but then
// reverted prior to release to follow the Go proposals process first.
//
// Our goal is to support only valid RFC3339 strings regardless of what version
// of Go is being used, because the Go stdlib is just an implementation detail
// of the cty stdlib and so these functions should not very their behavior
// significantly due to being compiled against a different Go version.
//
// These inline copies of the code from upstream should likely stay here
// indefinitely even if functionality like this _is_ accepted in a later version
// of Go, because this now defines cty's definition of RFC3339 parsing as
// intentionally independent of Go's.
func parseStrictRFC3339(str string) (time.Time, error) {
t, ok := parseRFC3339(str)
if !ok {
// If parsing failed then we'll try to use time.Parse to gather up a
// helpful error object.
_, err := time.Parse(time.RFC3339, str)
if err != nil {
return time.Time{}, err
}
// The parse template syntax cannot correctly validate RFC 3339.
// Explicitly check for cases that Parse is unable to validate for.
// See https://go.dev/issue/54580.
num2 := func(str string) byte { return 10*(str[0]-'0') + (str[1] - '0') }
switch {
case str[len("2006-01-02T")+1] == ':': // hour must be two digits
return time.Time{}, &time.ParseError{
Layout: time.RFC3339,
Value: str,
LayoutElem: "15",
ValueElem: str[len("2006-01-02T"):][:1],
Message: ": hour must have two digits",
}
case str[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period
return time.Time{}, &time.ParseError{
Layout: time.RFC3339,
Value: str,
LayoutElem: ".",
ValueElem: ",",
Message: ": sub-second separator must be a period",
}
case str[len(str)-1] != 'Z':
switch {
case num2(str[len(str)-len("07:00"):]) >= 24: // timezone hour must be in range
return time.Time{}, &time.ParseError{
Layout: time.RFC3339,
Value: str,
LayoutElem: "Z07:00",
ValueElem: str[len(str)-len("Z07:00"):],
Message: ": timezone hour out of range",
}
case num2(str[len(str)-len("00"):]) >= 60: // timezone minute must be in range
return time.Time{}, &time.ParseError{
Layout: time.RFC3339,
Value: str,
LayoutElem: "Z07:00",
ValueElem: str[len(str)-len("Z07:00"):],
Message: ": timezone minute out of range",
}
}
default: // unknown error; should not occur
return time.Time{}, &time.ParseError{
Layout: time.RFC3339,
Value: str,
LayoutElem: time.RFC3339,
ValueElem: str,
Message: "",
}
}
}
return t, nil
}
func parseRFC3339(s string) (time.Time, bool) {
// parseUint parses s as an unsigned decimal integer and
// verifies that it is within some range.
// If it is invalid or out-of-range,
// it sets ok to false and returns the min value.
ok := true
parseUint := func(s string, min, max int) (x int) {
for _, c := range []byte(s) {
if c < '0' || '9' < c {
ok = false
return min
}
x = x*10 + int(c) - '0'
}
if x < min || max < x {
ok = false
return min
}
return x
}
// Parse the date and time.
if len(s) < len("2006-01-02T15:04:05") {
return time.Time{}, false
}
year := parseUint(s[0:4], 0, 9999) // e.g., 2006
month := parseUint(s[5:7], 1, 12) // e.g., 01
day := parseUint(s[8:10], 1, daysIn(time.Month(month), year)) // e.g., 02
hour := parseUint(s[11:13], 0, 23) // e.g., 15
min := parseUint(s[14:16], 0, 59) // e.g., 04
sec := parseUint(s[17:19], 0, 59) // e.g., 05
if !ok || !(s[4] == '-' && s[7] == '-' && s[10] == 'T' && s[13] == ':' && s[16] == ':') {
return time.Time{}, false
}
s = s[19:]
// Parse the fractional second.
var nsec int
if len(s) >= 2 && s[0] == '.' && isDigit(s, 1) {
n := 2
for ; n < len(s) && isDigit(s, n); n++ {
}
nsec, _, _ = parseNanoseconds(s, n)
s = s[n:]
}
// Parse the time zone.
loc := time.UTC
if len(s) != 1 || s[0] != 'Z' {
if len(s) != len("-07:00") {
return time.Time{}, false
}
hr := parseUint(s[1:3], 0, 23) // e.g., 07
mm := parseUint(s[4:6], 0, 59) // e.g., 00
if !ok || !((s[0] == '-' || s[0] == '+') && s[3] == ':') {
return time.Time{}, false
}
zoneOffsetSecs := (hr*60 + mm) * 60
if s[0] == '-' {
zoneOffsetSecs = -zoneOffsetSecs
}
loc = time.FixedZone("", zoneOffsetSecs)
}
t := time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
return t, true
}
func isDigit(s string, i int) bool {
if len(s) <= i {
return false
}
c := s[i]
return '0' <= c && c <= '9'
}
func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) {
if value[0] != '.' && value[0] != ',' {
err = errBadTimestamp
return
}
if nbytes > 10 {
value = value[:10]
nbytes = 10
}
if ns, err = strconv.Atoi(value[1:nbytes]); err != nil {
return
}
if ns < 0 {
rangeErrString = "fractional second"
return
}
// We need nanoseconds, which means scaling by the number
// of missing digits in the format, maximum length 10.
scaleDigits := 10 - nbytes
for i := 0; i < scaleDigits; i++ {
ns *= 10
}
return
}
// These are internal errors used by the date parsing code and are not ever
// returned by public functions.
var errBadTimestamp = errors.New("bad value for field")
// daysBefore[m] counts the number of days in a non-leap year
// before month m begins. There is an entry for m=12, counting
// the number of days before January of next year (365).
var daysBefore = [...]int32{
0,
31,
31 + 28,
31 + 28 + 31,
31 + 28 + 31 + 30,
31 + 28 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
}
func daysIn(m time.Month, year int) int {
if m == time.February && isLeap(year) {
return 29
}
return int(daysBefore[m] - daysBefore[m-1])
}
func isLeap(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}

View File

@ -18,33 +18,7 @@ import (
//go:generate gofmt -w format_fsm.go
var FormatFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "format",
Type: cty.String,
},
},
VarParam: &function.Parameter{
Name: "args",
Type: cty.DynamicPseudoType,
AllowNull: true,
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
for _, arg := range args[1:] {
if !arg.IsWhollyKnown() {
// We require all nested values to be known because the only
// thing we can do for a collection/structural type is print
// it as JSON and that requires it to be wholly known.
return cty.UnknownVal(cty.String), nil
}
}
str, err := formatFSM(args[0].AsString(), args[1:])
return cty.StringVal(str), err
},
})
var FormatListFunc = function.New(&function.Spec{
Description: `Constructs a string by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`,
Params: []function.Parameter{
{
Name: "format",
@ -57,7 +31,46 @@ var FormatListFunc = function.New(&function.Spec{
AllowNull: true,
AllowUnknown: true,
},
Type: function.StaticReturnType(cty.List(cty.String)),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
for _, arg := range args[1:] {
if !arg.IsWhollyKnown() {
// We require all nested values to be known because the only
// thing we can do for a collection/structural type is print
// it as JSON and that requires it to be wholly known.
// However, we might be able to refine the result with a
// known prefix, if there are literal characters before the
// first formatting verb.
f := args[0].AsString()
if idx := strings.IndexByte(f, '%'); idx > 0 {
prefix := f[:idx]
return cty.UnknownVal(cty.String).Refine().StringPrefix(prefix).NewValue(), nil
}
return cty.UnknownVal(cty.String), nil
}
}
str, err := formatFSM(args[0].AsString(), args[1:])
return cty.StringVal(str), err
},
})
var FormatListFunc = function.New(&function.Spec{
Description: `Constructs a list of strings by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`,
Params: []function.Parameter{
{
Name: "format",
Type: cty.String,
},
},
VarParam: &function.Parameter{
Name: "args",
Type: cty.DynamicPseudoType,
AllowNull: true,
AllowUnknown: true,
},
Type: function.StaticReturnType(cty.List(cty.String)),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
fmtVal := args[0]
args = args[1:]
@ -162,7 +175,7 @@ var FormatListFunc = function.New(&function.Spec{
// We require all nested values to be known because the only
// thing we can do for a collection/structural type is print
// it as JSON and that requires it to be wholly known.
ret = append(ret, cty.UnknownVal(cty.String))
ret = append(ret, cty.UnknownVal(cty.String).RefineNotNull())
continue Results
}
}

View File

@ -9,6 +9,7 @@ import (
)
var EqualFunc = function.New(&function.Spec{
Description: `Returns true if the two given values are equal, or false otherwise.`,
Params: []function.Parameter{
{
Name: "a",
@ -25,13 +26,15 @@ var EqualFunc = function.New(&function.Spec{
AllowNull: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].Equals(args[1]), nil
},
})
var NotEqualFunc = function.New(&function.Spec{
Description: `Returns false if the two given values are equal, or true otherwise.`,
Params: []function.Parameter{
{
Name: "a",
@ -48,14 +51,16 @@ var NotEqualFunc = function.New(&function.Spec{
AllowNull: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].Equals(args[1]).Not(), nil
},
})
var CoalesceFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Returns the first of the given arguments that isn't null, or raises an error if there are no non-null arguments.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "vals",
Type: cty.DynamicPseudoType,
@ -74,6 +79,7 @@ var CoalesceFunc = function.New(&function.Spec{
}
return retType, nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
for _, argVal := range args {
if !argVal.IsKnown() {
@ -89,6 +95,10 @@ var CoalesceFunc = function.New(&function.Spec{
},
})
func refineNonNull(b *cty.RefinementBuilder) *cty.RefinementBuilder {
return b.NotNull()
}
// Equal determines whether the two given values are equal, returning a
// bool value.
func Equal(a cty.Value, b cty.Value) (cty.Value, error) {

View File

@ -1,28 +1,55 @@
package stdlib
import (
"bytes"
"strings"
"unicode/utf8"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/json"
)
var JSONEncodeFunc = function.New(&function.Spec{
Description: `Returns a string containing a JSON representation of the given value.`,
Params: []function.Parameter{
{
Name: "val",
Type: cty.DynamicPseudoType,
AllowUnknown: true,
AllowDynamicType: true,
AllowNull: true,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
val := args[0]
if !val.IsWhollyKnown() {
// We can't serialize unknowns, so if the value is unknown or
// contains any _nested_ unknowns then our result must be
// unknown.
return cty.UnknownVal(retType), nil
// unknown. However, we might still be able to at least constrain
// the prefix of our string so that downstreams can sniff for
// whether it's valid JSON and what result types it could have.
valRng := val.Range()
if valRng.CouldBeNull() {
// If null is possible then we can't constrain the result
// beyond the type constraint, because the very first character
// of the string is what distinguishes a null.
return cty.UnknownVal(retType), nil
}
b := cty.UnknownVal(retType).Refine()
ty := valRng.TypeConstraint()
switch {
case ty == cty.String:
b = b.StringPrefixFull(`"`)
case ty.IsObjectType() || ty.IsMapType():
b = b.StringPrefixFull("{")
case ty.IsTupleType() || ty.IsListType() || ty.IsSetType():
b = b.StringPrefixFull("[")
}
return b.NewValue(), nil
}
if val.IsNull() {
@ -34,11 +61,17 @@ var JSONEncodeFunc = function.New(&function.Spec{
return cty.NilVal, err
}
// json.Marshal should already produce a trimmed string, but we'll
// make sure it always is because our unknown value refinements above
// assume there will be no leading whitespace before the value.
buf = bytes.TrimSpace(buf)
return cty.StringVal(string(buf)), nil
},
})
var JSONDecodeFunc = function.New(&function.Spec{
Description: `Parses the given string as JSON and returns a value corresponding to what the JSON document describes.`,
Params: []function.Parameter{
{
Name: "str",
@ -48,6 +81,42 @@ var JSONDecodeFunc = function.New(&function.Spec{
Type: func(args []cty.Value) (cty.Type, error) {
str := args[0]
if !str.IsKnown() {
// If the string isn't known then we can't fully parse it, but
// if the value has been refined with a prefix then we may at
// least be able to reject obviously-invalid syntax and maybe
// even predict the result type. It's safe to return a specific
// result type only if parsing a full document with this prefix
// would return exactly that type or fail with a syntax error.
rng := str.Range()
if prefix := strings.TrimSpace(rng.StringPrefix()); prefix != "" {
// If we know at least one character then it should be one
// of the few characters that can introduce a JSON value.
switch r, _ := utf8.DecodeRuneInString(prefix); r {
case '{', '[':
// These can start object values and array values
// respectively, but we can't actually form a full
// object type constraint or tuple type constraint
// without knowing all of the attributes, so we
// will still return DynamicPseudoType in this case.
case '"':
// This means that the result will either be a string
// or parsing will fail.
return cty.String, nil
case 't', 'f':
// Must either be a boolean value or a syntax error.
return cty.Bool, nil
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
// These characters would all start the "number" production.
return cty.Number, nil
case 'n':
// n is valid to begin the keyword "null" but that doesn't
// give us any extra type information.
default:
// No other characters are valid as the beginning of a
// JSON value, so we can safely return an early error.
return cty.NilType, function.NewArgErrorf(0, "a JSON document cannot begin with the character %q", r)
}
}
return cty.DynamicPseudoType, nil
}

View File

@ -11,6 +11,7 @@ import (
)
var AbsoluteFunc = function.New(&function.Spec{
Description: `If the given number is negative then returns its positive equivalent, or otherwise returns the given number unchanged.`,
Params: []function.Parameter{
{
Name: "num",
@ -19,13 +20,15 @@ var AbsoluteFunc = function.New(&function.Spec{
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return args[0].Absolute(), nil
},
})
var AddFunc = function.New(&function.Spec{
Description: `Returns the sum of the two given numbers.`,
Params: []function.Parameter{
{
Name: "a",
@ -38,7 +41,8 @@ var AddFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
// big.Float.Add can panic if the input values are opposing infinities,
// so we must catch that here in order to remain within
@ -59,6 +63,7 @@ var AddFunc = function.New(&function.Spec{
})
var SubtractFunc = function.New(&function.Spec{
Description: `Returns the difference between the two given numbers.`,
Params: []function.Parameter{
{
Name: "a",
@ -71,7 +76,8 @@ var SubtractFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
// big.Float.Sub can panic if the input values are infinities,
// so we must catch that here in order to remain within
@ -92,6 +98,7 @@ var SubtractFunc = function.New(&function.Spec{
})
var MultiplyFunc = function.New(&function.Spec{
Description: `Returns the product of the two given numbers.`,
Params: []function.Parameter{
{
Name: "a",
@ -104,7 +111,8 @@ var MultiplyFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
// big.Float.Mul can panic if the input values are both zero or both
// infinity, so we must catch that here in order to remain within
@ -126,6 +134,7 @@ var MultiplyFunc = function.New(&function.Spec{
})
var DivideFunc = function.New(&function.Spec{
Description: `Divides the first given number by the second.`,
Params: []function.Parameter{
{
Name: "a",
@ -138,7 +147,8 @@ var DivideFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
// big.Float.Quo can panic if the input values are both zero or both
// infinity, so we must catch that here in order to remain within
@ -160,6 +170,7 @@ var DivideFunc = function.New(&function.Spec{
})
var ModuloFunc = function.New(&function.Spec{
Description: `Divides the first given number by the second and then returns the remainder.`,
Params: []function.Parameter{
{
Name: "a",
@ -172,7 +183,8 @@ var ModuloFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
// big.Float.Mul can panic if the input values are both zero or both
// infinity, so we must catch that here in order to remain within
@ -194,90 +206,107 @@ var ModuloFunc = function.New(&function.Spec{
})
var GreaterThanFunc = function.New(&function.Spec{
Description: `Returns true if and only if the second number is greater than the first.`,
Params: []function.Parameter{
{
Name: "a",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
{
Name: "b",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].GreaterThan(args[1]), nil
},
})
var GreaterThanOrEqualToFunc = function.New(&function.Spec{
Description: `Returns true if and only if the second number is greater than or equal to the first.`,
Params: []function.Parameter{
{
Name: "a",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
{
Name: "b",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].GreaterThanOrEqualTo(args[1]), nil
},
})
var LessThanFunc = function.New(&function.Spec{
Description: `Returns true if and only if the second number is less than the first.`,
Params: []function.Parameter{
{
Name: "a",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
{
Name: "b",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].LessThan(args[1]), nil
},
})
var LessThanOrEqualToFunc = function.New(&function.Spec{
Description: `Returns true if and only if the second number is less than or equal to the first.`,
Params: []function.Parameter{
{
Name: "a",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
{
Name: "b",
Type: cty.Number,
AllowUnknown: true,
AllowDynamicType: true,
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].LessThanOrEqualTo(args[1]), nil
},
})
var NegateFunc = function.New(&function.Spec{
Description: `Multiplies the given number by -1.`,
Params: []function.Parameter{
{
Name: "num",
@ -286,20 +315,23 @@ var NegateFunc = function.New(&function.Spec{
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return args[0].Negate(), nil
},
})
var MinFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Returns the numerically smallest of all of the given numbers.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "numbers",
Type: cty.Number,
AllowDynamicType: true,
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
if len(args) == 0 {
return cty.NilVal, fmt.Errorf("must pass at least one number")
@ -317,13 +349,15 @@ var MinFunc = function.New(&function.Spec{
})
var MaxFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Returns the numerically greatest of all of the given numbers.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "numbers",
Type: cty.Number,
AllowDynamicType: true,
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
if len(args) == 0 {
return cty.NilVal, fmt.Errorf("must pass at least one number")
@ -341,6 +375,7 @@ var MaxFunc = function.New(&function.Spec{
})
var IntFunc = function.New(&function.Spec{
Description: `Discards any fractional portion of the given number.`,
Params: []function.Parameter{
{
Name: "num",
@ -348,7 +383,8 @@ var IntFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
bf := args[0].AsBigFloat()
if bf.IsInt() {
@ -363,13 +399,15 @@ var IntFunc = function.New(&function.Spec{
// CeilFunc is a function that returns the closest whole number greater
// than or equal to the given value.
var CeilFunc = function.New(&function.Spec{
Description: `Returns the smallest whole number that is greater than or equal to the given value.`,
Params: []function.Parameter{
{
Name: "num",
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
f := args[0].AsBigFloat()
@ -392,13 +430,15 @@ var CeilFunc = function.New(&function.Spec{
// FloorFunc is a function that returns the closest whole number lesser
// than or equal to the given value.
var FloorFunc = function.New(&function.Spec{
Description: `Returns the greatest whole number that is less than or equal to the given value.`,
Params: []function.Parameter{
{
Name: "num",
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
f := args[0].AsBigFloat()
@ -420,6 +460,7 @@ var FloorFunc = function.New(&function.Spec{
// LogFunc is a function that returns the logarithm of a given number in a given base.
var LogFunc = function.New(&function.Spec{
Description: `Returns the logarithm of the given number in the given base.`,
Params: []function.Parameter{
{
Name: "num",
@ -430,7 +471,8 @@ var LogFunc = function.New(&function.Spec{
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var num float64
if err := gocty.FromCtyValue(args[0], &num); err != nil {
@ -448,6 +490,7 @@ var LogFunc = function.New(&function.Spec{
// PowFunc is a function that returns the logarithm of a given number in a given base.
var PowFunc = function.New(&function.Spec{
Description: `Returns the given number raised to the given power (exponentiation).`,
Params: []function.Parameter{
{
Name: "num",
@ -458,7 +501,8 @@ var PowFunc = function.New(&function.Spec{
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var num float64
if err := gocty.FromCtyValue(args[0], &num); err != nil {
@ -477,13 +521,15 @@ var PowFunc = function.New(&function.Spec{
// SignumFunc is a function that determines the sign of a number, returning a
// number between -1 and 1 to represent the sign..
var SignumFunc = function.New(&function.Spec{
Description: `Returns 0 if the given number is zero, 1 if the given number is positive, or -1 if the given number is negative.`,
Params: []function.Parameter{
{
Name: "num",
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.Number),
Type: function.StaticReturnType(cty.Number),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var num int
if err := gocty.FromCtyValue(args[0], &num); err != nil {
@ -502,6 +548,7 @@ var SignumFunc = function.New(&function.Spec{
// ParseIntFunc is a function that parses a string argument and returns an integer of the specified base.
var ParseIntFunc = function.New(&function.Spec{
Description: `Parses the given string as a number of the given base, or raises an error if the string contains invalid characters.`,
Params: []function.Parameter{
{
Name: "number",
@ -519,6 +566,7 @@ var ParseIntFunc = function.New(&function.Spec{
}
return cty.Number, nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
var numstr string

View File

@ -10,6 +10,7 @@ import (
)
var RegexFunc = function.New(&function.Spec{
Description: `Applies the given regular expression pattern to the given string and returns information about a single match, or raises an error if there is no match.`,
Params: []function.Parameter{
{
Name: "pattern",
@ -32,6 +33,7 @@ var RegexFunc = function.New(&function.Spec{
}
return retTy, err
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
if retType == cty.DynamicPseudoType {
return cty.DynamicVal, nil
@ -54,6 +56,7 @@ var RegexFunc = function.New(&function.Spec{
})
var RegexAllFunc = function.New(&function.Spec{
Description: `Applies the given regular expression pattern to the given string and returns a list of information about all non-overlapping matches, or an empty list if there are no matches.`,
Params: []function.Parameter{
{
Name: "pattern",
@ -77,6 +80,7 @@ var RegexAllFunc = function.New(&function.Spec{
}
return cty.List(retTy), err
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
ety := retType.ElementType()
if ety == cty.DynamicPseudoType {

View File

@ -9,7 +9,8 @@ import (
)
var ConcatFunc = function.New(&function.Spec{
Params: []function.Parameter{},
Description: `Concatenates together all of the given lists or tuples into a single sequence, preserving the input order.`,
Params: []function.Parameter{},
VarParam: &function.Parameter{
Name: "seqs",
Type: cty.DynamicPseudoType,
@ -73,6 +74,7 @@ var ConcatFunc = function.New(&function.Spec{
}
return cty.Tuple(etys), nil
},
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
switch {
case retType.IsListType():
@ -137,11 +139,13 @@ var ConcatFunc = function.New(&function.Spec{
})
var RangeFunc = function.New(&function.Spec{
Description: `Returns a list of numbers spread evenly over a particular range.`,
VarParam: &function.Parameter{
Name: "params",
Type: cty.Number,
},
Type: function.StaticReturnType(cty.List(cty.Number)),
Type: function.StaticReturnType(cty.List(cty.Number)),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var start, end, step cty.Value
switch len(args) {

View File

@ -10,6 +10,7 @@ import (
)
var SetHasElementFunc = function.New(&function.Spec{
Description: `Returns true if the given set contains the given element, or false otherwise.`,
Params: []function.Parameter{
{
Name: "set",
@ -22,13 +23,15 @@ var SetHasElementFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Bool),
Type: function.StaticReturnType(cty.Bool),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return args[0].HasElement(args[1]), nil
},
})
var SetUnionFunc = function.New(&function.Spec{
Description: `Returns the union of all given sets.`,
Params: []function.Parameter{
{
Name: "first_set",
@ -41,13 +44,15 @@ var SetUnionFunc = function.New(&function.Spec{
Type: cty.Set(cty.DynamicPseudoType),
AllowDynamicType: true,
},
Type: setOperationReturnType,
Type: setOperationReturnType,
RefineResult: refineNonNull,
Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
return s1.Union(s2)
}, true),
})
var SetIntersectionFunc = function.New(&function.Spec{
Description: `Returns the intersection of all given sets.`,
Params: []function.Parameter{
{
Name: "first_set",
@ -60,13 +65,15 @@ var SetIntersectionFunc = function.New(&function.Spec{
Type: cty.Set(cty.DynamicPseudoType),
AllowDynamicType: true,
},
Type: setOperationReturnType,
Type: setOperationReturnType,
RefineResult: refineNonNull,
Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
return s1.Intersection(s2)
}, false),
})
var SetSubtractFunc = function.New(&function.Spec{
Description: `Returns the relative complement of the two given sets.`,
Params: []function.Parameter{
{
Name: "a",
@ -79,13 +86,15 @@ var SetSubtractFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: setOperationReturnType,
Type: setOperationReturnType,
RefineResult: refineNonNull,
Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
return s1.Subtract(s2)
}, false),
})
var SetSymmetricDifferenceFunc = function.New(&function.Spec{
Description: `Returns the symmetric difference of the two given sets.`,
Params: []function.Parameter{
{
Name: "first_set",
@ -98,7 +107,8 @@ var SetSymmetricDifferenceFunc = function.New(&function.Spec{
Type: cty.Set(cty.DynamicPseudoType),
AllowDynamicType: true,
},
Type: setOperationReturnType,
Type: setOperationReturnType,
RefineResult: refineNonNull,
Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
return s1.SymmetricDifference(s2)
}, false),

View File

@ -14,6 +14,7 @@ import (
)
var UpperFunc = function.New(&function.Spec{
Description: "Returns the given string with all Unicode letters translated to their uppercase equivalents.",
Params: []function.Parameter{
{
Name: "str",
@ -21,7 +22,8 @@ var UpperFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
in := args[0].AsString()
out := strings.ToUpper(in)
@ -30,6 +32,7 @@ var UpperFunc = function.New(&function.Spec{
})
var LowerFunc = function.New(&function.Spec{
Description: "Returns the given string with all Unicode letters translated to their lowercase equivalents.",
Params: []function.Parameter{
{
Name: "str",
@ -37,7 +40,8 @@ var LowerFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
in := args[0].AsString()
out := strings.ToLower(in)
@ -46,6 +50,7 @@ var LowerFunc = function.New(&function.Spec{
})
var ReverseFunc = function.New(&function.Spec{
Description: "Returns the given string with all of its Unicode characters in reverse order.",
Params: []function.Parameter{
{
Name: "str",
@ -53,7 +58,8 @@ var ReverseFunc = function.New(&function.Spec{
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
in := []byte(args[0].AsString())
out := make([]byte, len(in))
@ -73,48 +79,75 @@ var ReverseFunc = function.New(&function.Spec{
})
var StrlenFunc = function.New(&function.Spec{
Description: "Returns the number of Unicode characters (technically: grapheme clusters) in the given string.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
AllowUnknown: true,
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.Number),
RefineResult: func(b *cty.RefinementBuilder) *cty.RefinementBuilder {
// String length is never null and never negative.
// (We might refine the lower bound even more inside Impl.)
return b.NotNull().NumberRangeLowerBound(cty.NumberIntVal(0), true)
},
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
in := args[0].AsString()
l := 0
inB := []byte(in)
for i := 0; i < len(in); {
d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true)
l++
i += d
if !args[0].IsKnown() {
ret := cty.UnknownVal(cty.Number)
// We may be able to still return a constrained result based on the
// refined range of the unknown value.
inRng := args[0].Range()
if inRng.TypeConstraint() == cty.String {
prefixLen := int64(graphemeClusterCount(inRng.StringPrefix()))
ret = ret.Refine().NumberRangeLowerBound(cty.NumberIntVal(prefixLen), true).NewValue()
}
return ret, nil
}
in := args[0].AsString()
l := graphemeClusterCount(in)
return cty.NumberIntVal(int64(l)), nil
},
})
func graphemeClusterCount(in string) int {
l := 0
inB := []byte(in)
for i := 0; i < len(in); {
d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true)
l++
i += d
}
return l
}
var SubstrFunc = function.New(&function.Spec{
Description: "Extracts a substring from the given string.",
Params: []function.Parameter{
{
Name: "str",
Description: "The input string.",
Type: cty.String,
AllowDynamicType: true,
},
{
Name: "offset",
Description: "The starting offset in Unicode characters.",
Type: cty.Number,
AllowDynamicType: true,
},
{
Name: "length",
Description: "The maximum length of the result in Unicode characters.",
Type: cty.Number,
AllowDynamicType: true,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
in := []byte(args[0].AsString())
var offset, length int
@ -197,17 +230,21 @@ var SubstrFunc = function.New(&function.Spec{
})
var JoinFunc = function.New(&function.Spec{
Description: "Concatenates together the elements of all given lists with a delimiter, producing a single string.",
Params: []function.Parameter{
{
Name: "separator",
Type: cty.String,
Name: "separator",
Description: "Delimiter to insert between the given strings.",
Type: cty.String,
},
},
VarParam: &function.Parameter{
Name: "lists",
Type: cty.List(cty.String),
Name: "lists",
Description: "One or more lists of strings to join.",
Type: cty.List(cty.String),
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
sep := args[0].AsString()
listVals := args[1:]
@ -244,20 +281,32 @@ var JoinFunc = function.New(&function.Spec{
})
var SortFunc = function.New(&function.Spec{
Description: "Applies a lexicographic sort to the elements of the given list.",
Params: []function.Parameter{
{
Name: "list",
Type: cty.List(cty.String),
Name: "list",
Type: cty.List(cty.String),
AllowUnknown: true,
},
},
Type: function.StaticReturnType(cty.List(cty.String)),
Type: function.StaticReturnType(cty.List(cty.String)),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
listVal := args[0]
if !listVal.IsWhollyKnown() {
// If some of the element values aren't known yet then we
// can't yet predict the order of the result.
return cty.UnknownVal(retType), nil
// can't yet predict the order of the result, but we can be
// sure that the length won't change.
ret := cty.UnknownVal(retType)
if listVal.Type().IsListType() {
rng := listVal.Range()
ret = ret.Refine().
CollectionLengthLowerBound(rng.LengthLowerBound()).
CollectionLengthUpperBound(rng.LengthUpperBound()).
NewValue()
}
return ret, nil
}
if listVal.LengthInt() == 0 { // Easy path
return listVal, nil
@ -282,17 +331,21 @@ var SortFunc = function.New(&function.Spec{
})
var SplitFunc = function.New(&function.Spec{
Description: "Produces a list of one or more strings by splitting the given string at all instances of a given separator substring.",
Params: []function.Parameter{
{
Name: "separator",
Type: cty.String,
Name: "separator",
Description: "The substring that delimits the result strings.",
Type: cty.String,
},
{
Name: "str",
Type: cty.String,
Name: "str",
Description: "The string to split.",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.List(cty.String)),
Type: function.StaticReturnType(cty.List(cty.String)),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
sep := args[0].AsString()
str := args[1].AsString()
@ -311,13 +364,15 @@ var SplitFunc = function.New(&function.Spec{
// ChompFunc is a function that removes newline characters at the end of a
// string.
var ChompFunc = function.New(&function.Spec{
Description: "Removes one or more newline characters from the end of the given string.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil
@ -327,17 +382,21 @@ var ChompFunc = function.New(&function.Spec{
// IndentFunc is a function that adds a given number of spaces to the
// beginnings of all but the first line in a given multi-line string.
var IndentFunc = function.New(&function.Spec{
Description: "Adds a given number of spaces after each newline character in the given string.",
Params: []function.Parameter{
{
Name: "spaces",
Type: cty.Number,
Name: "spaces",
Description: "Number of spaces to add after each newline character.",
Type: cty.Number,
},
{
Name: "str",
Type: cty.String,
Name: "str",
Description: "The string to transform.",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var spaces int
if err := gocty.FromCtyValue(args[0], &spaces); err != nil {
@ -352,13 +411,15 @@ var IndentFunc = function.New(&function.Spec{
// TitleFunc is a function that converts the first letter of each word in the
// given string to uppercase.
var TitleFunc = function.New(&function.Spec{
Description: "Replaces one letter after each non-letter and non-digit character with its uppercase equivalent.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return cty.StringVal(strings.Title(args[0].AsString())), nil
},
@ -367,13 +428,15 @@ var TitleFunc = function.New(&function.Spec{
// TrimSpaceFunc is a function that removes any space characters from the start
// and end of the given string.
var TrimSpaceFunc = function.New(&function.Spec{
Description: "Removes any consecutive space characters (as defined by Unicode) from the start and end of the given string.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil
},
@ -382,20 +445,27 @@ var TrimSpaceFunc = function.New(&function.Spec{
// TrimFunc is a function that removes the specified characters from the start
// and end of the given string.
var TrimFunc = function.New(&function.Spec{
Description: "Removes consecutive sequences of characters in \"cutset\" from the start and end of the given string.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
Name: "str",
Description: "The string to trim.",
Type: cty.String,
},
{
Name: "cutset",
Type: cty.String,
Name: "cutset",
Description: "A string containing all of the characters to trim. Each character is taken separately, so the order of characters is insignificant.",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
str := args[0].AsString()
cutset := args[1].AsString()
// NOTE: This doesn't properly handle any character that is encoded
// with multiple sequential code units, such as letters with
// combining diacritics and emoji modifier sequences.
return cty.StringVal(strings.Trim(str, cutset)), nil
},
})
@ -403,17 +473,21 @@ var TrimFunc = function.New(&function.Spec{
// TrimPrefixFunc is a function that removes the specified characters from the
// start the given string.
var TrimPrefixFunc = function.New(&function.Spec{
Description: "Removes the given prefix from the start of the given string, if present.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
Name: "str",
Description: "The string to trim.",
Type: cty.String,
},
{
Name: "prefix",
Type: cty.String,
Name: "prefix",
Description: "The prefix to remove, if present.",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
str := args[0].AsString()
prefix := args[1].AsString()
@ -424,17 +498,21 @@ var TrimPrefixFunc = function.New(&function.Spec{
// TrimSuffixFunc is a function that removes the specified characters from the
// end of the given string.
var TrimSuffixFunc = function.New(&function.Spec{
Description: "Removes the given suffix from the start of the given string, if present.",
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
Name: "str",
Description: "The string to trim.",
Type: cty.String,
},
{
Name: "suffix",
Type: cty.String,
Name: "suffix",
Description: "The suffix to remove, if present.",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
str := args[0].AsString()
cutset := args[1].AsString()

View File

@ -12,21 +12,26 @@ import (
// substring, and replaces each occurence with a given replacement string.
// The substr argument is a simple string.
var ReplaceFunc = function.New(&function.Spec{
Description: `Replaces all instances of the given substring in the given string with the given replacement string.`,
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
Name: "str",
Description: `The string to search within.`,
Type: cty.String,
},
{
Name: "substr",
Type: cty.String,
Name: "substr",
Description: `The substring to search for.`,
Type: cty.String,
},
{
Name: "replace",
Type: cty.String,
Name: "replace",
Description: `The new substring to replace substr with.`,
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
str := args[0].AsString()
substr := args[1].AsString()
@ -40,13 +45,14 @@ var ReplaceFunc = function.New(&function.Spec{
// given substring, and replaces each occurence with a given replacement
// string. The substr argument must be a valid regular expression.
var RegexReplaceFunc = function.New(&function.Spec{
Description: `Applies the given regular expression pattern to the given string and replaces all matches with the given replacement string.`,
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
{
Name: "substr",
Name: "pattern",
Type: cty.String,
},
{
@ -54,7 +60,8 @@ var RegexReplaceFunc = function.New(&function.Spec{
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Type: function.StaticReturnType(cty.String),
RefineResult: refineNonNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
str := args[0].AsString()
substr := args[1].AsString()

View File

@ -1,204 +0,0 @@
package cty
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"math/big"
"github.com/zclconf/go-cty/cty/set"
)
// GobEncode is an implementation of the gob.GobEncoder interface, which
// allows Values to be included in structures encoded with encoding/gob.
//
// Currently it is not possible to represent values of capsule types in gob,
// because the types themselves cannot be represented.
func (val Value) GobEncode() ([]byte, error) {
if val.IsMarked() {
return nil, errors.New("value is marked")
}
buf := &bytes.Buffer{}
enc := gob.NewEncoder(buf)
gv := gobValue{
Version: 0,
Ty: val.ty,
V: val.v,
}
err := enc.Encode(gv)
if err != nil {
return nil, fmt.Errorf("error encoding cty.Value: %s", err)
}
return buf.Bytes(), nil
}
// GobDecode is an implementation of the gob.GobDecoder interface, which
// inverts the operation performed by GobEncode. See the documentation of
// GobEncode for considerations when using cty.Value instances with gob.
func (val *Value) GobDecode(buf []byte) error {
r := bytes.NewReader(buf)
dec := gob.NewDecoder(r)
var gv gobValue
err := dec.Decode(&gv)
if err != nil {
return fmt.Errorf("error decoding cty.Value: %s", err)
}
if gv.Version != 0 {
return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version)
}
// Because big.Float.GobEncode is implemented with a pointer reciever,
// gob encoding of an interface{} containing a *big.Float value does not
// round-trip correctly, emerging instead as a non-pointer big.Float.
// The rest of cty expects all number values to be represented by
// *big.Float, so we'll fix that up here.
gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty)
val.ty = gv.Ty
val.v = gv.V
return nil
}
// GobEncode is an implementation of the gob.GobEncoder interface, which
// allows Types to be included in structures encoded with encoding/gob.
//
// Currently it is not possible to represent capsule types in gob.
func (t Type) GobEncode() ([]byte, error) {
buf := &bytes.Buffer{}
enc := gob.NewEncoder(buf)
gt := gobType{
Version: 0,
Impl: t.typeImpl,
}
err := enc.Encode(gt)
if err != nil {
return nil, fmt.Errorf("error encoding cty.Type: %s", err)
}
return buf.Bytes(), nil
}
// GobDecode is an implementatino of the gob.GobDecoder interface, which
// reverses the encoding performed by GobEncode to allow types to be recovered
// from gob buffers.
func (t *Type) GobDecode(buf []byte) error {
r := bytes.NewReader(buf)
dec := gob.NewDecoder(r)
var gt gobType
err := dec.Decode(&gt)
if err != nil {
return fmt.Errorf("error decoding cty.Type: %s", err)
}
if gt.Version != 0 {
return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version)
}
t.typeImpl = gt.Impl
return nil
}
// Capsule types cannot currently be gob-encoded, because they rely on pointer
// equality and we have no way to recover the original pointer on decode.
func (t *capsuleType) GobEncode() ([]byte, error) {
return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName))
}
func (t *capsuleType) GobDecode() ([]byte, error) {
return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName))
}
type gobValue struct {
Version int
Ty Type
V interface{}
}
type gobType struct {
Version int
Impl typeImpl
}
type gobCapsuleTypeImpl struct {
}
// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number
// values through gob: the big.Float.GobEncode method is implemented on a
// pointer receiver, and so it loses the "pointer-ness" of the value on
// encode, causing the values to emerge the other end as big.Float rather than
// *big.Float as we expect elsewhere in cty.
//
// The implementation of gobDecodeFixNumberPtr mutates the given raw value
// during its work, and may either return the same value mutated or a new
// value. Callers must no longer use whatever value they pass as "raw" after
// this function is called.
func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} {
// Unfortunately we need to work recursively here because number values
// might be embedded in structural or collection type values.
switch {
case ty.Equals(Number):
if bf, ok := raw.(big.Float); ok {
return &bf // wrap in pointer
}
case ty.IsMapType() && ty.ElementType().Equals(Number):
if m, ok := raw.(map[string]interface{}); ok {
for k, v := range m {
m[k] = gobDecodeFixNumberPtr(v, ty.ElementType())
}
}
case ty.IsListType() && ty.ElementType().Equals(Number):
if s, ok := raw.([]interface{}); ok {
for i, v := range s {
s[i] = gobDecodeFixNumberPtr(v, ty.ElementType())
}
}
case ty.IsSetType() && ty.ElementType().Equals(Number):
if s, ok := raw.(set.Set); ok {
newS := set.NewSet(s.Rules())
for it := s.Iterator(); it.Next(); {
newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType())
newS.Add(newV)
}
return newS
}
case ty.IsObjectType():
if m, ok := raw.(map[string]interface{}); ok {
for k, v := range m {
aty := ty.AttributeType(k)
m[k] = gobDecodeFixNumberPtr(v, aty)
}
}
case ty.IsTupleType():
if s, ok := raw.([]interface{}); ok {
for i, v := range s {
ety := ty.TupleElementType(i)
s[i] = gobDecodeFixNumberPtr(v, ety)
}
}
}
return raw
}
// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr
// that works with already-constructed values. This is primarily for testing,
// to fix up intentionally-invalid number values for the parts of the test
// code that need them to be valid, such as calling GoString on them.
func gobDecodeFixNumberPtrVal(v Value) Value {
raw := gobDecodeFixNumberPtr(v.v, v.ty)
return Value{
v: raw,
ty: v.ty,
}
}

View File

@ -11,7 +11,7 @@ import (
var valueType = reflect.TypeOf(cty.Value{})
var typeType = reflect.TypeOf(cty.Type{})
var setType = reflect.TypeOf(set.Set{})
var setType = reflect.TypeOf(set.Set[interface{}]{})
var bigFloatType = reflect.TypeOf(big.Float{})
var bigIntType = reflect.TypeOf(big.Int{})

View File

@ -268,7 +268,7 @@ func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error)
return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety))
}
rawSet := val.Interface().(set.Set)
rawSet := val.Interface().(set.Set[interface{}])
inVals := rawSet.Values()
if len(inVals) == 0 {

View File

@ -8,7 +8,7 @@ import (
// unknowns, for operations that short-circuit to return unknown in that case.
func anyUnknown(values ...Value) bool {
for _, val := range values {
if val.v == unknown {
if _, unknown := val.v.(*unknownType); unknown {
return true
}
}
@ -39,7 +39,7 @@ func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, e
)
}
if val.v == unknown {
if _, unknown := val.v.(*unknownType); unknown {
hasUnknown = true
}
}

View File

@ -51,7 +51,7 @@ func (t typeMap) GoString() string {
return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT)
}
// IsMapType returns true if the given type is a list type, regardless of its
// IsMapType returns true if the given type is a map type, regardless of its
// element type.
func (t Type) IsMapType() bool {
_, ok := t.typeImpl.(typeMap)

View File

@ -190,6 +190,9 @@ func (val Value) HasSameMarks(other Value) bool {
// An application that never calls this method does not need to worry about
// handling marked values.
func (val Value) Mark(mark interface{}) Value {
if _, ok := mark.(ValueMarks); ok {
panic("cannot call Value.Mark with a ValueMarks value (use WithMarks instead)")
}
var newMarker marker
newMarker.realV = val.v
if mr, ok := val.v.(marker); ok {

View File

@ -11,14 +11,14 @@ import (
// to talk about a subset of paths within a value that meet some criteria,
// without directly modifying the values at those paths.
type PathSet struct {
set set.Set
set set.Set[Path]
}
// NewPathSet creates and returns a PathSet, with initial contents optionally
// set by the given arguments.
func NewPathSet(paths ...Path) PathSet {
ret := PathSet{
set: set.NewSet(pathSetRules{}),
set: set.NewSet(set.Rules[Path](pathSetRules{})),
}
for _, path := range paths {
@ -61,7 +61,7 @@ func (s PathSet) List() []Path {
}
ret := make([]Path, 0, s.set.Length())
for it := s.set.Iterator(); it.Next(); {
ret = append(ret, it.Value().(Path))
ret = append(ret, it.Value())
}
return ret
}
@ -134,8 +134,7 @@ var indexStepPlaceholder = []byte("#")
type pathSetRules struct {
}
func (r pathSetRules) Hash(v interface{}) int {
path := v.(Path)
func (r pathSetRules) Hash(path Path) int {
hash := crc64.New(crc64Table)
for _, rawStep := range path {
@ -159,10 +158,7 @@ func (r pathSetRules) Hash(v interface{}) int {
return int(hash.Sum64())
}
func (r pathSetRules) Equivalent(a, b interface{}) bool {
aPath := a.(Path)
bPath := b.(Path)
func (r pathSetRules) Equivalent(aPath, bPath Path) bool {
if len(aPath) != len(bPath) {
return false
}
@ -198,7 +194,7 @@ func (r pathSetRules) Equivalent(a, b interface{}) bool {
}
// SameRules is true if both Rules instances are pathSetRules structs.
func (r pathSetRules) SameRules(other set.Rules) bool {
func (r pathSetRules) SameRules(other set.Rules[Path]) bool {
_, ok := other.(pathSetRules)
return ok
}

View File

@ -74,6 +74,8 @@ func rawNumberEqual(a, b *big.Float) bool {
return false
case a == nil: // b == nil too then, due to previous case
return true
case a.Sign() != b.Sign():
return false
default:
// This format and precision matches that used by cty/json.Marshal,
// and thus achieves our definition of "two numbers are equal if

View File

@ -1,76 +0,0 @@
package set
import (
"bytes"
"encoding/gob"
"fmt"
)
// GobEncode is an implementation of the interface gob.GobEncoder, allowing
// sets to be included in structures encoded via gob.
//
// The set rules are included in the serialized value, so the caller must
// register its concrete rules type with gob.Register before using a
// set in a gob, and possibly also implement GobEncode/GobDecode to customize
// how any parameters are persisted.
//
// The set elements are also included, so if they are of non-primitive types
// they too must be registered with gob.
//
// If the produced gob values will persist for a long time, the caller must
// ensure compatibility of the rules implementation. In particular, if the
// definition of element equivalence changes between encoding and decoding
// then two distinct stored elements may be considered equivalent on decoding,
// causing the recovered set to have fewer elements than when it was stored.
func (s Set) GobEncode() ([]byte, error) {
gs := gobSet{
Version: 0,
Rules: s.rules,
Values: s.Values(),
}
buf := &bytes.Buffer{}
enc := gob.NewEncoder(buf)
err := enc.Encode(gs)
if err != nil {
return nil, fmt.Errorf("error encoding set.Set: %s", err)
}
return buf.Bytes(), nil
}
// GobDecode is the opposite of GobEncode. See GobEncode for information
// on the requirements for and caveats of including set values in gobs.
func (s *Set) GobDecode(buf []byte) error {
r := bytes.NewReader(buf)
dec := gob.NewDecoder(r)
var gs gobSet
err := dec.Decode(&gs)
if err != nil {
return fmt.Errorf("error decoding set.Set: %s", err)
}
if gs.Version != 0 {
return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version)
}
victim := NewSetFromSlice(gs.Rules, gs.Values)
s.vals = victim.vals
s.rules = victim.rules
return nil
}
type gobSet struct {
Version int
Rules Rules
// The bucket-based representation is for efficient in-memory access, but
// for serialization it's enough to just retain the values themselves,
// which we can re-bucket using the rules (which may have changed!) when
// we re-inflate.
Values []interface{}
}
func init() {
gob.Register([]interface{}(nil))
}

View File

@ -1,15 +1,15 @@
package set
type Iterator struct {
vals []interface{}
type Iterator[T any] struct {
vals []T
idx int
}
func (it *Iterator) Value() interface{} {
func (it *Iterator[T]) Value() T {
return it.vals[it.idx]
}
func (it *Iterator) Next() bool {
func (it *Iterator[T]) Next() bool {
it.idx++
return it.idx < len(it.vals)
}

View File

@ -7,10 +7,10 @@ import (
// Add inserts the given value into the receiving Set.
//
// This mutates the set in-place. This operation is not thread-safe.
func (s Set) Add(val interface{}) {
func (s Set[T]) Add(val T) {
hv := s.rules.Hash(val)
if _, ok := s.vals[hv]; !ok {
s.vals[hv] = make([]interface{}, 0, 1)
s.vals[hv] = make([]T, 0, 1)
}
bucket := s.vals[hv]
@ -26,7 +26,7 @@ func (s Set) Add(val interface{}) {
// Remove deletes the given value from the receiving set, if indeed it was
// there in the first place. If the value is not present, this is a no-op.
func (s Set) Remove(val interface{}) {
func (s Set[T]) Remove(val T) {
hv := s.rules.Hash(val)
bucket, ok := s.vals[hv]
if !ok {
@ -35,7 +35,7 @@ func (s Set) Remove(val interface{}) {
for i, ev := range bucket {
if s.rules.Equivalent(val, ev) {
newBucket := make([]interface{}, 0, len(bucket)-1)
newBucket := make([]T, 0, len(bucket)-1)
newBucket = append(newBucket, bucket[:i]...)
newBucket = append(newBucket, bucket[i+1:]...)
if len(newBucket) > 0 {
@ -50,7 +50,7 @@ func (s Set) Remove(val interface{}) {
// Has returns true if the given value is in the receiving set, or false if
// it is not.
func (s Set) Has(val interface{}) bool {
func (s Set[T]) Has(val T) bool {
hv := s.rules.Hash(val)
bucket, ok := s.vals[hv]
if !ok {
@ -67,7 +67,7 @@ func (s Set) Has(val interface{}) bool {
// Copy performs a shallow copy of the receiving set, returning a new set
// with the same rules and elements.
func (s Set) Copy() Set {
func (s Set[T]) Copy() Set[T] {
ret := NewSet(s.rules)
for k, v := range s.vals {
ret.vals[k] = v
@ -92,10 +92,10 @@ func (s Set) Copy() Set {
//
// Once an iterator has been created for a set, the set *must not* be mutated
// until the iterator is no longer in use.
func (s Set) Iterator() *Iterator {
func (s Set[T]) Iterator() *Iterator[T] {
vals := s.Values()
return &Iterator{
return &Iterator[T]{
vals: vals,
idx: -1,
}
@ -103,7 +103,7 @@ func (s Set) Iterator() *Iterator {
// EachValue calls the given callback once for each value in the set, in an
// undefined order that callers should not depend on.
func (s Set) EachValue(cb func(interface{})) {
func (s Set[T]) EachValue(cb func(T)) {
it := s.Iterator()
for it.Next() {
cb(it.Value())
@ -114,8 +114,8 @@ func (s Set) EachValue(cb func(interface{})) {
// an order then the result is in that order. If no order is provided or if
// it is not a total order then the result order is undefined, but consistent
// for a particular set value within a specific release of cty.
func (s Set) Values() []interface{} {
var ret []interface{}
func (s Set[T]) Values() []T {
var ret []T
// Sort the bucketIds to ensure that we always traverse in a
// consistent order.
bucketIDs := make([]int, 0, len(s.vals))
@ -128,7 +128,7 @@ func (s Set) Values() []interface{} {
ret = append(ret, s.vals[bucketID]...)
}
if orderRules, ok := s.rules.(OrderedRules); ok {
if orderRules, ok := s.rules.(OrderedRules[T]); ok {
sort.SliceStable(ret, func(i, j int) bool {
return orderRules.Less(ret[i], ret[j])
})
@ -138,7 +138,7 @@ func (s Set) Values() []interface{} {
}
// Length returns the number of values in the set.
func (s Set) Length() int {
func (s Set[T]) Length() int {
var count int
for _, bucket := range s.vals {
count = count + len(bucket)
@ -149,13 +149,13 @@ func (s Set) Length() int {
// Union returns a new set that contains all of the members of both the
// receiving set and the given set. Both sets must have the same rules, or
// else this function will panic.
func (s1 Set) Union(s2 Set) Set {
func (s1 Set[T]) Union(s2 Set[T]) Set[T] {
mustHaveSameRules(s1, s2)
rs := NewSet(s1.rules)
s1.EachValue(func(v interface{}) {
s1.EachValue(func(v T) {
rs.Add(v)
})
s2.EachValue(func(v interface{}) {
s2.EachValue(func(v T) {
rs.Add(v)
})
return rs
@ -164,10 +164,10 @@ func (s1 Set) Union(s2 Set) Set {
// Intersection returns a new set that contains the values that both the
// receiver and given sets have in common. Both sets must have the same rules,
// or else this function will panic.
func (s1 Set) Intersection(s2 Set) Set {
func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] {
mustHaveSameRules(s1, s2)
rs := NewSet(s1.rules)
s1.EachValue(func(v interface{}) {
s1.EachValue(func(v T) {
if s2.Has(v) {
rs.Add(v)
}
@ -178,10 +178,10 @@ func (s1 Set) Intersection(s2 Set) Set {
// Subtract returns a new set that contains all of the values from the receiver
// that are not also in the given set. Both sets must have the same rules,
// or else this function will panic.
func (s1 Set) Subtract(s2 Set) Set {
func (s1 Set[T]) Subtract(s2 Set[T]) Set[T] {
mustHaveSameRules(s1, s2)
rs := NewSet(s1.rules)
s1.EachValue(func(v interface{}) {
s1.EachValue(func(v T) {
if !s2.Has(v) {
rs.Add(v)
}
@ -193,15 +193,15 @@ func (s1 Set) Subtract(s2 Set) Set {
// both the receiver and given sets, except those that both sets have in
// common. Both sets must have the same rules, or else this function will
// panic.
func (s1 Set) SymmetricDifference(s2 Set) Set {
func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] {
mustHaveSameRules(s1, s2)
rs := NewSet(s1.rules)
s1.EachValue(func(v interface{}) {
s1.EachValue(func(v T) {
if !s2.Has(v) {
rs.Add(v)
}
})
s2.EachValue(func(v interface{}) {
s2.EachValue(func(v T) {
if !s1.Has(v) {
rs.Add(v)
}

View File

@ -4,13 +4,13 @@ package set
//
// Each Set has a Rules instance, whose methods must satisfy the interface
// contracts given below for any value that will be added to the set.
type Rules interface {
type Rules[T any] interface {
// Hash returns an int that somewhat-uniquely identifies the given value.
//
// A good hash function will minimize collisions for values that will be
// added to the set, though collisions *are* permitted. Collisions will
// simply reduce the efficiency of operations on the set.
Hash(interface{}) int
Hash(T) int
// Equivalent returns true if and only if the two values are considered
// equivalent for the sake of set membership. Two values that are
@ -21,11 +21,11 @@ type Rules interface {
// Two values that are equivalent *must* result in the same hash value,
// though it is *not* required that two values with the same hash value
// be equivalent.
Equivalent(interface{}, interface{}) bool
Equivalent(T, T) bool
// SameRules returns true if the instance is equivalent to another Rules
// instance.
SameRules(Rules) bool
// instance over the same element type.
SameRules(Rules[T]) bool
}
// OrderedRules is an extension of Rules that can apply a partial order to
@ -37,8 +37,8 @@ type Rules interface {
// is undefined but consistent for a particular version of cty. The exact
// order in that case is not part of the contract and is subject to change
// between versions.
type OrderedRules interface {
Rules
type OrderedRules[T any] interface {
Rules[T]
// Less returns true if and only if the first argument should sort before
// the second argument. If the second argument should sort before the first

View File

@ -19,20 +19,20 @@ import (
// Set operations are not optimized to minimize memory pressure. Mutating
// a set will generally create garbage and so should perhaps be avoided in
// tight loops where memory pressure is a concern.
type Set struct {
vals map[int][]interface{}
rules Rules
type Set[T any] struct {
vals map[int][]T
rules Rules[T]
}
// NewSet returns an empty set with the membership rules given.
func NewSet(rules Rules) Set {
return Set{
vals: map[int][]interface{}{},
func NewSet[T any](rules Rules[T]) Set[T] {
return Set[T]{
vals: map[int][]T{},
rules: rules,
}
}
func NewSetFromSlice(rules Rules, vals []interface{}) Set {
func NewSetFromSlice[T any](rules Rules[T], vals []T) Set[T] {
s := NewSet(rules)
for _, v := range vals {
s.Add(v)
@ -40,11 +40,11 @@ func NewSetFromSlice(rules Rules, vals []interface{}) Set {
return s
}
func sameRules(s1 Set, s2 Set) bool {
func sameRules[T any](s1 Set[T], s2 Set[T]) bool {
return s1.rules.SameRules(s2.rules)
}
func mustHaveSameRules(s1 Set, s2 Set) {
func mustHaveSameRules[T any](s1 Set[T], s2 Set[T]) {
if !sameRules(s1, s2) {
panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules))
}
@ -52,11 +52,11 @@ func mustHaveSameRules(s1 Set, s2 Set) {
// HasRules returns true if and only if the receiving set has the given rules
// instance as its rules.
func (s Set) HasRules(rules Rules) bool {
func (s Set[T]) HasRules(rules Rules[T]) bool {
return s.rules.SameRules(rules)
}
// Rules returns the receiving set's rules instance.
func (s Set) Rules() Rules {
func (s Set[T]) Rules() Rules[T] {
return s.rules
}

View File

@ -21,15 +21,15 @@ type ValueSet struct {
// ValueSet is just a thin wrapper around a set.Set with our value-oriented
// "rules" applied. We do this so that the caller can work in terms of
// cty.Value objects even though the set internals use the raw values.
s set.Set
s set.Set[interface{}]
}
// NewValueSet creates and returns a new ValueSet with the given element type.
func NewValueSet(ety Type) ValueSet {
return newValueSet(set.NewSet(setRules{Type: ety}))
return newValueSet(set.NewSet(newSetRules(ety)))
}
func newValueSet(s set.Set) ValueSet {
func newValueSet(s set.Set[interface{}]) ValueSet {
return ValueSet{
s: s,
}

View File

@ -21,7 +21,11 @@ type setRules struct {
Type Type
}
var _ set.OrderedRules = setRules{}
var _ set.OrderedRules[interface{}] = setRules{}
func newSetRules(ety Type) set.Rules[interface{}] {
return setRules{ety}
}
// Hash returns a hash value for the receiver that can be used for equality
// checks where some inaccuracy is tolerable.
@ -67,7 +71,7 @@ func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool {
// SameRules is only true if the other Rules instance is also a setRules struct,
// and the types are considered equal.
func (r setRules) SameRules(other set.Rules) bool {
func (r setRules) SameRules(other set.Rules[interface{}]) bool {
rules, ok := other.(setRules)
if !ok {
return false
@ -250,6 +254,25 @@ func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) {
return
}
if val.ty.IsCapsuleType() {
buf.WriteRune('«')
ops := val.ty.CapsuleOps()
if ops != nil && ops.HashKey != nil {
key := ops.HashKey(val.EncapsulatedValue())
buf.WriteString(fmt.Sprintf("%q", key))
} else {
// If there isn't an explicit hash implementation then we'll
// just generate the same hash value for every value of this
// type, which is logically fine but less efficient for
// larger sets because we'll have to bucket all values
// together and scan over them with Equals to determine
// set membership.
buf.WriteRune('?')
}
buf.WriteRune('»')
return
}
// should never get down here
panic("unsupported type in set hash")
panic(fmt.Sprintf("unsupported type %#v in set hash", val.ty))
}

View File

@ -1,57 +0,0 @@
package cty
import (
"encoding/gob"
"fmt"
"math/big"
"strings"
"github.com/zclconf/go-cty/cty/set"
)
// InternalTypesToRegister is a slice of values that covers all of the
// internal types used in the representation of cty.Type and cty.Value
// across all cty Types.
//
// This is intended to be used to register these types with encoding
// packages that require registration of types used in interfaces, such as
// encoding/gob, thus allowing cty types and values to be included in streams
// created from those packages. However, registering with gob is not necessary
// since that is done automatically as a side-effect of importing this package.
//
// Callers should not do anything with the values here except pass them on
// verbatim to a registration function.
//
// If the calling application uses Capsule types that wrap local structs either
// directly or indirectly, these structs may also need to be registered in
// order to support encoding and decoding of values of these types. That is the
// responsibility of the calling application.
var InternalTypesToRegister []interface{}
func init() {
InternalTypesToRegister = []interface{}{
primitiveType{},
typeList{},
typeMap{},
typeObject{},
typeSet{},
setRules{},
set.Set{},
typeTuple{},
big.Float{},
capsuleType{},
[]interface{}(nil),
map[string]interface{}(nil),
}
// Register these with gob here, rather than in gob.go, to ensure
// that this will always happen after we build the above.
for _, tv := range InternalTypesToRegister {
typeName := fmt.Sprintf("%T", tv)
if strings.HasPrefix(typeName, "cty.") {
gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv)
} else {
gob.Register(tv)
}
}
}

View File

@ -3,11 +3,19 @@ package cty
// unknownType is the placeholder type used for the sigil value representing
// "Unknown", to make it unambigiously distinct from any other possible value.
type unknownType struct {
// refinement is an optional object which, if present, describes some
// additional constraints we know about the range of real values this
// unknown value could be a placeholder for.
refinement unknownValRefinement
}
// unknown is a special value that can be used as the internal value of a
// Value to create a placeholder for a value that isn't yet known.
var unknown interface{} = &unknownType{}
// totallyUnknown is the representation a a value we know nothing about at
// all. Subsequent refinements of an unknown value will cause creation of
// other values of unknownType that can represent additional constraints
// on the unknown value, but all unknown values start as totally unknown
// and we will also typically lose all unknown value refinements when
// round-tripping through serialization formats.
var totallyUnknown interface{} = &unknownType{}
// UnknownVal returns an Value that represents an unknown value of the given
// type. Unknown values can be used to represent a value that is
@ -19,7 +27,7 @@ var unknown interface{} = &unknownType{}
func UnknownVal(t Type) Value {
return Value{
ty: t,
v: unknown,
v: totallyUnknown,
}
}
@ -80,6 +88,6 @@ func init() {
}
DynamicVal = Value{
ty: DynamicPseudoType,
v: unknown,
v: totallyUnknown,
}
}

View File

@ -0,0 +1,747 @@
package cty
import (
"fmt"
"math"
"strings"
"github.com/zclconf/go-cty/cty/ctystrings"
)
// Refine creates a [RefinementBuilder] with which to annotate the reciever
// with zero or more additional refinements that constrain the range of
// the value.
//
// Calling methods on a RefinementBuilder for a known value essentially just
// serves as assertions about the range of that value, leading to panics if
// those assertions don't hold in practice. This is mainly supported just to
// make programs that rely on refinements automatically self-check by using
// the refinement codepath unconditionally on both placeholders and final
// values for those placeholders. It's always a bug to refine the range of
// an unknown value and then later substitute an exact value outside of the
// refined range.
//
// Calling methods on a RefinementBuilder for an unknown value is perhaps
// more useful because the newly-refined value will then be a placeholder for
// a smaller range of values and so it may be possible for other operations
// on the unknown value to return a known result despite the exact value not
// yet being known.
//
// It is never valid to refine [DynamicVal], because that value is a
// placeholder for a value about which we knkow absolutely nothing. A value
// must at least have a known root type before it can support further
// refinement.
func (v Value) Refine() *RefinementBuilder {
v, marks := v.Unmark()
if unk, isUnk := v.v.(*unknownType); isUnk && unk.refinement != nil {
// We're refining a value that's already been refined before, so
// we'll start from a copy of its existing refinements.
wip := unk.refinement.copy()
return &RefinementBuilder{v, marks, wip}
}
ty := v.Type()
var wip unknownValRefinement
switch {
case ty == DynamicPseudoType && !v.IsKnown():
panic("cannot refine an unknown value of an unknown type")
case ty == String:
wip = &refinementString{}
case ty == Number:
wip = &refinementNumber{}
case ty.IsCollectionType():
wip = &refinementCollection{
// A collection can never have a negative length, so we'll
// start with that already constrained.
minLen: 0,
maxLen: math.MaxInt,
}
case ty == Bool || ty.IsObjectType() || ty.IsTupleType() || ty.IsCapsuleType():
// For other known types we'll just track nullability
wip = &refinementNullable{}
case ty == DynamicPseudoType && v.IsNull():
// It's okay in principle to refine a null value of unknown type,
// although all we can refine about it is that it's definitely null and
// so this is pretty pointless and only supported to avoid callers
// always needing to treat this situation as a special case to avoid
// panic.
wip = &refinementNullable{
isNull: tristateTrue,
}
default:
// we leave "wip" as nil for all other types, representing that
// they don't support refinements at all and so any call on the
// RefinementBuilder should fail.
// NOTE: We intentionally don't allow any refinements for
// cty.DynamicVal here, even though it could be nice in principle
// to at least track non-nullness for those, because it's historically
// been valid to directly compare values with cty.DynamicVal using
// the Go "==" operator and recording a refinement for an untyped
// unknown value would break existing code relying on that.
}
return &RefinementBuilder{v, marks, wip}
}
// RefineWith is a variant of Refine which uses callback functions instead of
// the builder pattern.
//
// The result is equivalent to passing the return value of [Value.Refine] to the
// first callback, and then continue passing the builder through any other
// callbacks in turn, and then calling [RefinementBuilder.NewValue] on the
// final result.
//
// The builder pattern approach of [Value.Refine] is more convenient for inline
// annotation of refinements when constructing a value, but this alternative
// approach may be more convenient when applying pre-defined collections of
// refinements, or when refinements are defined separately from the values
// they will apply to.
//
// Each refiner callback should return the same pointer that it was given,
// typically after having mutated it using the [RefinementBuilder] methods.
// It's invalid to return a different builder.
func (v Value) RefineWith(refiners ...func(*RefinementBuilder) *RefinementBuilder) Value {
if len(refiners) == 0 {
return v
}
origBuilder := v.Refine()
builder := origBuilder
for _, refiner := range refiners {
builder = refiner(builder)
if builder != origBuilder {
panic("refiner callback returned a different builder")
}
}
return builder.NewValue()
}
// RefineNotNull is a shorthand for Value.Refine().NotNull().NewValue(), because
// declaring that a unknown value isn't null is by far the most common use of
// refinements.
func (v Value) RefineNotNull() Value {
return v.Refine().NotNull().NewValue()
}
// RefinementBuilder is a supporting type for the [Value.Refine] method,
// using the builder pattern to apply zero or more constraints before
// constructing a new value with all of those constraints applied.
//
// Most of the methods of this type return the same reciever to allow
// for method call chaining. End call chains with a call to
// [RefinementBuilder.NewValue] to obtain the newly-refined value.
type RefinementBuilder struct {
orig Value
marks ValueMarks
wip unknownValRefinement
}
func (b *RefinementBuilder) assertRefineable() {
if b.wip == nil {
panic(fmt.Sprintf("cannot refine a %#v value", b.orig.Type()))
}
}
// NotNull constrains the value as definitely not being null.
//
// NotNull is valid when refining values of the following types:
// - number, boolean, and string values
// - list, set, or map types of any element type
// - values of object types
// - values of collection types
// - values of capsule types
//
// When refining any other type this function will panic.
//
// In particular note that it is not valid to constrain an untyped value
// -- a value whose type is `cty.DynamicPseudoType` -- as being non-null.
// An unknown value of an unknown type is always completely unconstrained.
func (b *RefinementBuilder) NotNull() *RefinementBuilder {
b.assertRefineable()
if b.orig.IsKnown() && b.orig.IsNull() {
panic("refining null value as non-null")
}
if b.wip.null() == tristateTrue {
panic("refining null value as non-null")
}
b.wip.setNull(tristateFalse)
return b
}
// Null constrains the value as definitely null.
//
// Null is valid for the same types as [RefinementBuilder.NotNull].
// When refining any other type this function will panic.
//
// Explicitly cnstraining a value to be null is strange because that suggests
// that the caller does actually know the value -- there is only one null
// value for each type constraint -- but this is here for symmetry with the
// fact that a [ValueRange] can also represent that a value is definitely null.
func (b *RefinementBuilder) Null() *RefinementBuilder {
b.assertRefineable()
if b.orig.IsKnown() && !b.orig.IsNull() {
panic("refining non-null value as null")
}
if b.wip.null() == tristateFalse {
panic("refining non-null value as null")
}
b.wip.setNull(tristateTrue)
return b
}
// NumericRange constrains the upper and/or lower bounds of a number value,
// or panics if this builder is not refining a number value.
//
// The two given values are interpreted as inclusive bounds and either one
// may be an unknown number if only one of the two bounds is currently known.
// If either of the given values is not a non-null number value then this
// function will panic.
func (b *RefinementBuilder) NumberRangeInclusive(min, max Value) *RefinementBuilder {
return b.NumberRangeLowerBound(min, true).NumberRangeUpperBound(max, true)
}
// NumberRangeLowerBound constraints the lower bound of a number value, or
// panics if this builder is not refining a number value.
func (b *RefinementBuilder) NumberRangeLowerBound(min Value, inclusive bool) *RefinementBuilder {
b.assertRefineable()
wip, ok := b.wip.(*refinementNumber)
if !ok {
panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type()))
}
if !min.IsKnown() {
// Nothing to do if the lower bound is unknown.
return b
}
if min.IsNull() {
panic("number range lower bound must not be null")
}
if inclusive {
if gt := min.GreaterThan(b.orig); gt.IsKnown() && gt.True() {
panic(fmt.Sprintf("refining %#v to be >= %#v", b.orig, min))
}
} else {
if gt := min.GreaterThanOrEqualTo(b.orig); gt.IsKnown() && gt.True() {
panic(fmt.Sprintf("refining %#v to be > %#v", b.orig, min))
}
}
if wip.min != NilVal {
var ok Value
if inclusive && !wip.minInc {
ok = min.GreaterThan(wip.min)
} else {
ok = min.GreaterThanOrEqualTo(wip.min)
}
if ok.IsKnown() && ok.False() {
return b // Our existing refinement is more constrained
}
}
if min != NegativeInfinity {
wip.min = min
wip.minInc = inclusive
}
wip.assertConsistentBounds()
return b
}
// NumberRangeUpperBound constraints the upper bound of a number value, or
// panics if this builder is not refining a number value.
func (b *RefinementBuilder) NumberRangeUpperBound(max Value, inclusive bool) *RefinementBuilder {
b.assertRefineable()
wip, ok := b.wip.(*refinementNumber)
if !ok {
panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type()))
}
if !max.IsKnown() {
// Nothing to do if the upper bound is unknown.
return b
}
if max.IsNull() {
panic("number range upper bound must not be null")
}
if inclusive {
if lt := max.LessThan(b.orig); lt.IsKnown() && lt.True() {
panic(fmt.Sprintf("refining %#v to be <= %#v", b.orig, max))
}
} else {
if lt := max.LessThanOrEqualTo(b.orig); lt.IsKnown() && lt.True() {
panic(fmt.Sprintf("refining %#v to be < %#v", b.orig, max))
}
}
if wip.max != NilVal {
var ok Value
if inclusive && !wip.maxInc {
ok = max.LessThan(wip.max)
} else {
ok = max.LessThanOrEqualTo(wip.max)
}
if ok.IsKnown() && ok.False() {
return b // Our existing refinement is more constrained
}
}
if max != PositiveInfinity {
wip.max = max
wip.maxInc = inclusive
}
wip.assertConsistentBounds()
return b
}
// CollectionLengthLowerBound constrains the lower bound of the length of a
// collection value, or panics if this builder is not refining a collection
// value.
func (b *RefinementBuilder) CollectionLengthLowerBound(min int) *RefinementBuilder {
b.assertRefineable()
wip, ok := b.wip.(*refinementCollection)
if !ok {
panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type()))
}
minVal := NumberIntVal(int64(min))
if b.orig.IsKnown() {
realLen := b.orig.Length()
if gt := minVal.GreaterThan(realLen); gt.IsKnown() && gt.True() {
panic(fmt.Sprintf("refining collection of length %#v with lower bound %#v", realLen, min))
}
}
if wip.minLen > min {
return b // Our existing refinement is more constrained
}
wip.minLen = min
wip.assertConsistentLengthBounds()
return b
}
// CollectionLengthUpperBound constrains the upper bound of the length of a
// collection value, or panics if this builder is not refining a collection
// value.
//
// The upper bound must be a known, non-null number or this function will
// panic.
func (b *RefinementBuilder) CollectionLengthUpperBound(max int) *RefinementBuilder {
b.assertRefineable()
wip, ok := b.wip.(*refinementCollection)
if !ok {
panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type()))
}
if b.orig.IsKnown() {
maxVal := NumberIntVal(int64(max))
realLen := b.orig.Length()
if lt := maxVal.LessThan(realLen); lt.IsKnown() && lt.True() {
panic(fmt.Sprintf("refining collection of length %#v with upper bound %#v", realLen, max))
}
}
if wip.maxLen < max {
return b // Our existing refinement is more constrained
}
wip.maxLen = max
wip.assertConsistentLengthBounds()
return b
}
// CollectionLength is a shorthand for passing the same length to both
// [CollectionLengthLowerBound] and [CollectionLengthUpperBound].
//
// A collection with a refined length with equal bounds can sometimes collapse
// to a known value. Refining to length zero always produces a known value.
// The behavior for other lengths varies by collection type kind.
//
// If the unknown value is of a set type, it's only valid to use this method
// if the caller knows that there will be the given number of _unique_ values
// in the set. If any values might potentially coalesce together once known,
// use [CollectionLengthUpperBound] instead.
func (b *RefinementBuilder) CollectionLength(length int) *RefinementBuilder {
return b.CollectionLengthLowerBound(length).CollectionLengthUpperBound(length)
}
// StringPrefix constrains the prefix of a string value, or panics if this
// builder is not refining a string value.
//
// The given prefix will be Unicode normalized in the same way that a
// cty.StringVal would be.
//
// Due to Unicode normalization and grapheme cluster rules, appending new
// characters to a string can change the meaning of earlier characters.
// StringPrefix may discard one or more characters from the end of the given
// prefix to avoid that problem.
//
// Although cty cannot check this automatically, applications should avoid
// relying on the discarding of the suffix for correctness. For example, if the
// prefix ends with an emoji base character then StringPrefix will discard it
// in case subsequent characters include emoji modifiers, but it's still
// incorrect for the final string to use an entirely different base character.
//
// Applications which fully control the final result and can guarantee the
// subsequent characters will not combine with the prefix may be able to use
// [RefinementBuilder.StringPrefixFull] instead, after carefully reviewing
// the constraints described in its documentation.
func (b *RefinementBuilder) StringPrefix(prefix string) *RefinementBuilder {
return b.StringPrefixFull(ctystrings.SafeKnownPrefix(prefix))
}
// StringPrefixFull is a variant of StringPrefix that will never shorten the
// given prefix to take into account the possibility of the next character
// combining with the end of the prefix.
//
// Applications which fully control the subsequent characters can use this
// as long as they guarantee that the characters added later cannot possibly
// combine with characters at the end of the prefix to form a single grapheme
// cluster. For example, it would be unsafe to use the full prefix "hello" if
// there is any chance that the final string will add a combining diacritic
// character after the "o", because that would then change the final character.
//
// Use [RefinementBuilder.StringPrefix] instead if an application cannot fully
// control the final result to avoid violating this rule.
func (b *RefinementBuilder) StringPrefixFull(prefix string) *RefinementBuilder {
b.assertRefineable()
wip, ok := b.wip.(*refinementString)
if !ok {
panic(fmt.Sprintf("cannot refine string prefix for a %#v value", b.orig.Type()))
}
// We must apply the same Unicode processing we'd normally use for a
// cty string so that the prefix will be comparable.
prefix = NormalizeString(prefix)
// If we have a known string value then the given prefix must actually
// match it.
if b.orig.IsKnown() && !b.orig.IsNull() {
have := b.orig.AsString()
matchLen := len(have)
if l := len(prefix); l < matchLen {
matchLen = l
}
have = have[:matchLen]
new := prefix[:matchLen]
if have != new {
panic("refined prefix is inconsistent with known value")
}
}
// If we already have a refined prefix then the overlapping parts of that
// and the new prefix must match.
{
matchLen := len(wip.prefix)
if l := len(prefix); l < matchLen {
matchLen = l
}
have := wip.prefix[:matchLen]
new := prefix[:matchLen]
if have != new {
panic("refined prefix is inconsistent with previous refined prefix")
}
}
// We'll only save the new prefix if it's longer than the one we already
// had.
if len(prefix) > len(wip.prefix) {
wip.prefix = prefix
}
return b
}
// NewValue completes the refinement process by constructing a new value
// that is guaranteed to meet all of the previously-specified refinements.
//
// If the original value being refined was known then the result is exactly
// that value, because otherwise the previous refinement calls would have
// panicked reporting the refinements as invalid for the value.
//
// If the original value was unknown then the result is typically also unknown
// but may have additional refinements compared to the original. If the applied
// refinements have reduced the range to a single exact value then the result
// might be that known value.
func (b *RefinementBuilder) NewValue() (ret Value) {
defer func() {
// Regardless of how we return, the new value should have the same
// marks as our original value.
ret = ret.WithMarks(b.marks)
}()
if b.orig.IsKnown() {
return b.orig
}
// We have a few cases where the value has been refined enough that we now
// know exactly what the value is, or at least we can produce a more
// detailed approximation of it.
switch b.wip.null() {
case tristateTrue:
// There is only one null value of each type so this is now known.
return NullVal(b.orig.Type())
case tristateFalse:
// If we know it's definitely not null then we might have enough
// information to construct a known, non-null value.
if rfn, ok := b.wip.(*refinementNumber); ok {
// If both bounds are inclusive and equal then our value can
// only be the same number as the bounds.
if rfn.maxInc && rfn.minInc {
if rfn.min != NilVal && rfn.max != NilVal {
eq := rfn.min.Equals(rfn.max)
if eq.IsKnown() && eq.True() {
return rfn.min
}
}
}
} else if rfn, ok := b.wip.(*refinementCollection); ok {
// If both of the bounds are equal then we know the length is
// the same number as the bounds.
if rfn.minLen == rfn.maxLen {
knownLen := rfn.minLen
ty := b.orig.Type()
if knownLen == 0 {
// If we know the length is zero then we can construct
// a known value of any collection kind.
switch {
case ty.IsListType():
return ListValEmpty(ty.ElementType())
case ty.IsSetType():
return SetValEmpty(ty.ElementType())
case ty.IsMapType():
return MapValEmpty(ty.ElementType())
}
} else if ty.IsListType() {
// If we know the length of the list then we can
// create a known list with unknown elements instead
// of a wholly-unknown list.
elems := make([]Value, knownLen)
unk := UnknownVal(ty.ElementType())
for i := range elems {
elems[i] = unk
}
return ListVal(elems)
} else if ty.IsSetType() && knownLen == 1 {
// If we know we have a one-element set then we
// know the one element can't possibly coalesce with
// anything else and so we can create a known set with
// an unknown element.
return SetVal([]Value{UnknownVal(ty.ElementType())})
}
}
}
}
return Value{
ty: b.orig.ty,
v: &unknownType{refinement: b.wip},
}
}
// unknownValRefinment is an interface pretending to be a sum type representing
// the different kinds of unknown value refinements we support for different
// types of value.
type unknownValRefinement interface {
unknownValRefinementSigil()
copy() unknownValRefinement
null() tristateBool
setNull(tristateBool)
rawEqual(other unknownValRefinement) bool
GoString() string
}
type refinementString struct {
refinementNullable
prefix string
}
func (r *refinementString) unknownValRefinementSigil() {}
func (r *refinementString) copy() unknownValRefinement {
ret := *r
// Everything in refinementString is immutable, so a shallow copy is sufficient.
return &ret
}
func (r *refinementString) rawEqual(other unknownValRefinement) bool {
{
other, ok := other.(*refinementString)
if !ok {
return false
}
return (r.refinementNullable.rawEqual(&other.refinementNullable) &&
r.prefix == other.prefix)
}
}
func (r *refinementString) GoString() string {
var b strings.Builder
b.WriteString(r.refinementNullable.GoString())
if r.prefix != "" {
fmt.Fprintf(&b, ".StringPrefixFull(%q)", r.prefix)
}
return b.String()
}
type refinementNumber struct {
refinementNullable
min, max Value
minInc, maxInc bool
}
func (r *refinementNumber) unknownValRefinementSigil() {}
func (r *refinementNumber) copy() unknownValRefinement {
ret := *r
// Everything in refinementNumber is immutable, so a shallow copy is sufficient.
return &ret
}
func (r *refinementNumber) rawEqual(other unknownValRefinement) bool {
{
other, ok := other.(*refinementNumber)
if !ok {
return false
}
return (r.refinementNullable.rawEqual(&other.refinementNullable) &&
r.min.RawEquals(other.min) &&
r.max.RawEquals(other.max) &&
r.minInc == other.minInc &&
r.maxInc == other.maxInc)
}
}
func (r *refinementNumber) GoString() string {
var b strings.Builder
b.WriteString(r.refinementNullable.GoString())
if r.min != NilVal && r.min != NegativeInfinity {
fmt.Fprintf(&b, ".NumberLowerBound(%#v, %t)", r.min, r.minInc)
}
if r.max != NilVal && r.max != PositiveInfinity {
fmt.Fprintf(&b, ".NumberUpperBound(%#v, %t)", r.max, r.maxInc)
}
return b.String()
}
func (r *refinementNumber) assertConsistentBounds() {
if r.min == NilVal || r.max == NilVal {
return // If only one bound is constrained then there's nothing to be inconsistent with
}
var ok Value
if r.minInc != r.maxInc {
ok = r.min.LessThan(r.max)
} else {
ok = r.min.LessThanOrEqualTo(r.max)
}
if ok.IsKnown() && ok.False() {
panic(fmt.Sprintf("number lower bound %#v is greater than upper bound %#v", r.min, r.max))
}
}
type refinementCollection struct {
refinementNullable
minLen, maxLen int
}
func (r *refinementCollection) unknownValRefinementSigil() {}
func (r *refinementCollection) copy() unknownValRefinement {
ret := *r
// Everything in refinementCollection is immutable, so a shallow copy is sufficient.
return &ret
}
func (r *refinementCollection) rawEqual(other unknownValRefinement) bool {
{
other, ok := other.(*refinementCollection)
if !ok {
return false
}
return (r.refinementNullable.rawEqual(&other.refinementNullable) &&
r.minLen == other.minLen &&
r.maxLen == other.maxLen)
}
}
func (r *refinementCollection) GoString() string {
var b strings.Builder
b.WriteString(r.refinementNullable.GoString())
if r.minLen != 0 {
fmt.Fprintf(&b, ".CollectionLengthLowerBound(%d)", r.minLen)
}
if r.maxLen != math.MaxInt {
fmt.Fprintf(&b, ".CollectionLengthUpperBound(%d)", r.maxLen)
}
return b.String()
}
func (r *refinementCollection) assertConsistentLengthBounds() {
if r.maxLen < r.minLen {
panic(fmt.Sprintf("collection length upper bound %d is less than lower bound %d", r.maxLen, r.minLen))
}
}
type refinementNullable struct {
isNull tristateBool
}
func (r *refinementNullable) unknownValRefinementSigil() {}
func (r *refinementNullable) copy() unknownValRefinement {
ret := *r
// Everything in refinementJustNull is immutable, so a shallow copy is sufficient.
return &ret
}
func (r *refinementNullable) null() tristateBool {
return r.isNull
}
func (r *refinementNullable) setNull(v tristateBool) {
r.isNull = v
}
func (r *refinementNullable) rawEqual(other unknownValRefinement) bool {
{
other, ok := other.(*refinementNullable)
if !ok {
return false
}
return r.isNull == other.isNull
}
}
func (r *refinementNullable) GoString() string {
switch r.isNull {
case tristateFalse:
return ".NotNull()"
case tristateTrue:
return ".Null()"
default:
return ""
}
}
type tristateBool rune
const tristateTrue tristateBool = 'T'
const tristateFalse tristateBool = 'F'
const tristateUnknown tristateBool = 0

View File

@ -48,7 +48,8 @@ func (val Value) IsKnown() bool {
if val.IsMarked() {
return val.unmarkForce().IsKnown()
}
return val.v != unknown
_, unknown := val.v.(*unknownType)
return !unknown
}
// IsNull returns true if the value is null. Values of any type can be

View File

@ -5,8 +5,7 @@ import (
"math/big"
"reflect"
"golang.org/x/text/unicode/norm"
"github.com/zclconf/go-cty/cty/ctystrings"
"github.com/zclconf/go-cty/cty/set"
)
@ -107,7 +106,7 @@ func StringVal(v string) Value {
// A return value from this function can be meaningfully compared byte-for-byte
// with a Value.AsString result.
func NormalizeString(s string) string {
return norm.NFC.String(s)
return ctystrings.Normalize(s)
}
// ObjectVal returns a Value of an object type whose structure is defined
@ -287,7 +286,7 @@ func SetVal(vals []Value) Value {
rawList[i] = val.v
}
rawVal := set.NewSetFromSlice(setRules{elementType}, rawList)
rawVal := set.NewSetFromSlice(set.Rules[interface{}](setRules{elementType}), rawList)
return Value{
ty: Set(elementType),
@ -334,7 +333,7 @@ func SetValFromValueSet(s ValueSet) Value {
func SetValEmpty(element Type) Value {
return Value{
ty: Set(element),
v: set.NewSet(setRules{element}),
v: set.NewSet(set.Rules[interface{}](setRules{element})),
}
}

View File

@ -33,7 +33,17 @@ func (val Value) GoString() string {
return "cty.DynamicVal"
}
if !val.IsKnown() {
return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
rfn := val.v.(*unknownType).refinement
var suffix string
if rfn != nil {
calls := rfn.GoString()
if calls == ".NotNull()" {
suffix = ".RefineNotNull()"
} else {
suffix = ".Refine()" + rfn.GoString() + ".NewValue()"
}
}
return fmt.Sprintf("cty.UnknownVal(%#v)%s", val.ty, suffix)
}
// By the time we reach here we've dealt with all of the exceptions around
@ -47,6 +57,9 @@ func (val Value) GoString() string {
}
return "cty.False"
case Number:
if f, ok := val.v.(big.Float); ok {
panic(fmt.Sprintf("number value contains big.Float value %s, rather than pointer to big.Float", f.Text('g', -1)))
}
fv := val.v.(*big.Float)
// We'll try to use NumberIntVal or NumberFloatVal if we can, since
// the fully-general initializer call is pretty ugly-looking.
@ -122,13 +135,38 @@ func (val Value) Equals(other Value) Value {
return val.Equals(other).WithMarks(valMarks, otherMarks)
}
// Start by handling Unknown values before considering types.
// This needs to be done since Null values are always equal regardless of
// type.
// Some easy cases with comparisons to null.
switch {
case val.IsNull() && definitelyNotNull(other):
return False
case other.IsNull() && definitelyNotNull(val):
return False
}
// If we have one known value and one unknown value then we may be
// able to quickly disqualify equality based on the range of the unknown
// value.
if val.IsKnown() && !other.IsKnown() {
otherRng := other.Range()
if ok := otherRng.Includes(val); ok.IsKnown() && ok.False() {
return False
}
} else if other.IsKnown() && !val.IsKnown() {
valRng := val.Range()
if ok := valRng.Includes(other); ok.IsKnown() && ok.False() {
return False
}
}
// We need to deal with unknown values before anything else with nulls
// because any unknown value that hasn't yet been refined as non-null
// could become null, and nulls of any types are equal to one another.
unknownResult := func() Value {
return UnknownVal(Bool).Refine().NotNull().NewValue()
}
switch {
case !val.IsKnown() && !other.IsKnown():
// both unknown
return UnknownVal(Bool)
return unknownResult()
case val.IsKnown() && !other.IsKnown():
switch {
case val.IsNull(), other.ty.HasDynamicTypes():
@ -136,13 +174,13 @@ func (val Value) Equals(other Value) Value {
// nulls of any type are equal.
// An unknown with a dynamic type compares as unknown, which we need
// to check before the type comparison below.
return UnknownVal(Bool)
return unknownResult()
case !val.ty.Equals(other.ty):
// There is no null comparison or dynamic types, so unequal types
// will never be equal.
return False
default:
return UnknownVal(Bool)
return unknownResult()
}
case other.IsKnown() && !val.IsKnown():
switch {
@ -151,13 +189,13 @@ func (val Value) Equals(other Value) Value {
// nulls of any type are equal.
// An unknown with a dynamic type compares as unknown, which we need
// to check before the type comparison below.
return UnknownVal(Bool)
return unknownResult()
case !other.ty.Equals(val.ty):
// There's no null comparison or dynamic types, so unequal types
// will never be equal.
return False
default:
return UnknownVal(Bool)
return unknownResult()
}
}
@ -179,7 +217,7 @@ func (val Value) Equals(other Value) Value {
return BoolVal(false)
}
return UnknownVal(Bool)
return unknownResult()
}
if !val.ty.Equals(other.ty) {
@ -213,7 +251,7 @@ func (val Value) Equals(other Value) Value {
}
eq := lhs.Equals(rhs)
if !eq.IsKnown() {
return UnknownVal(Bool)
return unknownResult()
}
if eq.False() {
result = false
@ -234,7 +272,7 @@ func (val Value) Equals(other Value) Value {
}
eq := lhs.Equals(rhs)
if !eq.IsKnown() {
return UnknownVal(Bool)
return unknownResult()
}
if eq.False() {
result = false
@ -256,7 +294,7 @@ func (val Value) Equals(other Value) Value {
}
eq := lhs.Equals(rhs)
if !eq.IsKnown() {
return UnknownVal(Bool)
return unknownResult()
}
if eq.False() {
result = false
@ -265,16 +303,16 @@ func (val Value) Equals(other Value) Value {
}
}
case ty.IsSetType():
s1 := val.v.(set.Set)
s2 := other.v.(set.Set)
s1 := val.v.(set.Set[interface{}])
s2 := other.v.(set.Set[interface{}])
equal := true
// Two sets are equal if all of their values are known and all values
// in one are also in the other.
for it := s1.Iterator(); it.Next(); {
rv := it.Value()
if rv == unknown { // "unknown" is the internal representation of unknown-ness
return UnknownVal(Bool)
if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness
return unknownResult()
}
if !s2.Has(rv) {
equal = false
@ -282,8 +320,8 @@ func (val Value) Equals(other Value) Value {
}
for it := s2.Iterator(); it.Next(); {
rv := it.Value()
if rv == unknown { // "unknown" is the internal representation of unknown-ness
return UnknownVal(Bool)
if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness
return unknownResult()
}
if !s1.Has(rv) {
equal = false
@ -310,7 +348,7 @@ func (val Value) Equals(other Value) Value {
}
eq := lhs.Equals(rhs)
if !eq.IsKnown() {
return UnknownVal(Bool)
return unknownResult()
}
if eq.False() {
result = false
@ -390,7 +428,17 @@ func (val Value) RawEquals(other Value) bool {
other = other.unmarkForce()
if (!val.IsKnown()) && (!other.IsKnown()) {
return true
// If either unknown value has refinements then they must match.
valRfn := val.v.(*unknownType).refinement
otherRfn := other.v.(*unknownType).refinement
switch {
case (valRfn == nil) != (otherRfn == nil):
return false
case valRfn != nil:
return valRfn.rawEqual(otherRfn)
default:
return true
}
}
if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) {
return false
@ -545,7 +593,8 @@ func (val Value) Add(other Value) Value {
if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Add, val.Range(), other.Range()))
return ret.RefineNotNull()
}
ret := new(big.Float)
@ -564,7 +613,8 @@ func (val Value) Subtract(other Value) Value {
if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Subtract, val.Range(), other.Range()))
return ret.RefineNotNull()
}
return val.Add(other.Negate())
@ -580,7 +630,7 @@ func (val Value) Negate() Value {
if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
ret := new(big.Float).Neg(val.v.(*big.Float))
@ -597,8 +647,14 @@ func (val Value) Multiply(other Value) Value {
}
if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
// If either value is exactly zero then the result must either be
// zero or an error.
if val == Zero || other == Zero {
return Zero
}
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Multiply, val.Range(), other.Range()))
return ret.RefineNotNull()
}
// find the larger precision of the arguments
@ -643,7 +699,10 @@ func (val Value) Divide(other Value) Value {
if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
// TODO: We could potentially refine the range of the result here, but
// we don't right now because our division operation is not monotone
// if the denominator could potentially be zero.
return (*shortCircuit).RefineNotNull()
}
ret := new(big.Float)
@ -675,7 +734,7 @@ func (val Value) Modulo(other Value) Value {
if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
// We cheat a bit here with infinities, just abusing the Multiply operation
@ -713,7 +772,7 @@ func (val Value) Absolute() Value {
if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Number)
return *shortCircuit
return (*shortCircuit).Refine().NotNull().NumberRangeInclusive(Zero, UnknownVal(Number)).NewValue()
}
ret := (&big.Float{}).Abs(val.v.(*big.Float))
@ -886,23 +945,23 @@ func (val Value) HasIndex(key Value) Value {
}
if val.ty == DynamicPseudoType {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
switch {
case val.Type().IsListType():
if key.Type() == DynamicPseudoType {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if key.Type() != Number {
return False
}
if !key.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if !val.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
index, accuracy := key.v.(*big.Float).Int64()
@ -913,17 +972,17 @@ func (val Value) HasIndex(key Value) Value {
return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0)
case val.Type().IsMapType():
if key.Type() == DynamicPseudoType {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if key.Type() != String {
return False
}
if !key.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if !val.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
keyStr := key.v.(string)
@ -932,14 +991,14 @@ func (val Value) HasIndex(key Value) Value {
return BoolVal(exists)
case val.Type().IsTupleType():
if key.Type() == DynamicPseudoType {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if key.Type() != Number {
return False
}
if !key.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
index, accuracy := key.v.(*big.Float).Int64()
@ -974,16 +1033,16 @@ func (val Value) HasElement(elem Value) Value {
panic("not a set type")
}
if !val.IsKnown() || !elem.IsKnown() {
return UnknownVal(Bool)
return UnknownVal(Bool).RefineNotNull()
}
if val.IsNull() {
panic("can't call HasElement on a nil value")
panic("can't call HasElement on a null value")
}
if !ty.ElementType().Equals(elem.Type()) {
return False
}
s := val.v.(set.Set)
s := val.v.(set.Set[interface{}])
return BoolVal(s.Has(elem.v))
}
@ -1009,7 +1068,10 @@ func (val Value) Length() Value {
}
if !val.IsKnown() {
return UnknownVal(Number)
// If the whole collection isn't known then the length isn't known
// either, but we can still put some bounds on the range of the result.
rng := val.Range()
return UnknownVal(Number).RefineWith(valueRefineLengthResult(rng))
}
if val.Type().IsSetType() {
// The Length rules are a little different for sets because if any
@ -1017,7 +1079,7 @@ func (val Value) Length() Value {
// may or may not be equal to other elements in the set, and thus they
// may or may not coalesce with other elements and produce fewer
// items in the resulting set.
storeLength := int64(val.v.(set.Set).Length())
storeLength := int64(val.v.(set.Set[interface{}]).Length())
if storeLength == 1 || val.IsWhollyKnown() {
// If our set is wholly known then we know its length.
//
@ -1027,13 +1089,26 @@ func (val Value) Length() Value {
// unknown value cannot represent more than one known value.
return NumberIntVal(storeLength)
}
// Otherwise, we cannot predict the length.
return UnknownVal(Number)
// Otherwise, we cannot predict the length exactly but we can at
// least constrain both bounds of its range, because value coalescing
// can only ever reduce the number of elements in the set.
return UnknownVal(Number).Refine().NotNull().NumberRangeInclusive(NumberIntVal(1), NumberIntVal(storeLength)).NewValue()
}
return NumberIntVal(int64(val.LengthInt()))
}
func valueRefineLengthResult(collRng ValueRange) func(*RefinementBuilder) *RefinementBuilder {
return func(b *RefinementBuilder) *RefinementBuilder {
return b.
NotNull().
NumberRangeInclusive(
NumberIntVal(int64(collRng.LengthLowerBound())),
NumberIntVal(int64(collRng.LengthUpperBound())),
)
}
}
// LengthInt is like Length except it returns an int. It has the same behavior
// as Length except that it will panic if the receiver is unknown.
//
@ -1078,7 +1153,7 @@ func (val Value) LengthInt() int {
// compatibility with callers that were relying on LengthInt rather
// than calling Length. Instead of panicking when a set contains an
// unknown value, LengthInt returns the largest possible length.
return val.v.(set.Set).Length()
return val.v.(set.Set[interface{}]).Length()
case val.ty.IsMapType():
return len(val.v.(map[string]interface{}))
@ -1164,7 +1239,7 @@ func (val Value) Not() Value {
if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil {
shortCircuit = forceShortCircuitType(shortCircuit, Bool)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
return BoolVal(!val.v.(bool))
@ -1180,8 +1255,14 @@ func (val Value) And(other Value) Value {
}
if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
// If either value is known to be exactly False then it doesn't
// matter what the other value is, because the final result must
// either be False or an error.
if val == False || other == False {
return False
}
shortCircuit = forceShortCircuitType(shortCircuit, Bool)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
return BoolVal(val.v.(bool) && other.v.(bool))
@ -1197,8 +1278,14 @@ func (val Value) Or(other Value) Value {
}
if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
// If either value is known to be exactly True then it doesn't
// matter what the other value is, because the final result must
// either be True or an error.
if val == True || other == True {
return True
}
shortCircuit = forceShortCircuitType(shortCircuit, Bool)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
return BoolVal(val.v.(bool) || other.v.(bool))
@ -1214,8 +1301,30 @@ func (val Value) LessThan(other Value) Value {
}
if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
// We might be able to return a known answer even with unknown inputs.
// FIXME: This is more conservative than it needs to be, because it
// treats all bounds as exclusive bounds.
valRng := val.Range()
otherRng := other.Range()
if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number {
valMax, _ := valRng.NumberUpperBound()
otherMin, _ := otherRng.NumberLowerBound()
if valMax.IsKnown() && otherMin.IsKnown() {
if r := valMax.LessThan(otherMin); r.True() {
return True
}
}
valMin, _ := valRng.NumberLowerBound()
otherMax, _ := otherRng.NumberUpperBound()
if valMin.IsKnown() && otherMax.IsKnown() {
if r := valMin.GreaterThan(otherMax); r.True() {
return False
}
}
}
shortCircuit = forceShortCircuitType(shortCircuit, Bool)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0)
@ -1231,8 +1340,30 @@ func (val Value) GreaterThan(other Value) Value {
}
if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
// We might be able to return a known answer even with unknown inputs.
// FIXME: This is more conservative than it needs to be, because it
// treats all bounds as exclusive bounds.
valRng := val.Range()
otherRng := other.Range()
if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number {
valMin, _ := valRng.NumberLowerBound()
otherMax, _ := otherRng.NumberUpperBound()
if valMin.IsKnown() && otherMax.IsKnown() {
if r := valMin.GreaterThan(otherMax); r.True() {
return True
}
}
valMax, _ := valRng.NumberUpperBound()
otherMin, _ := otherRng.NumberLowerBound()
if valMax.IsKnown() && otherMin.IsKnown() {
if r := valMax.LessThan(otherMin); r.True() {
return False
}
}
}
shortCircuit = forceShortCircuitType(shortCircuit, Bool)
return *shortCircuit
return (*shortCircuit).RefineNotNull()
}
return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0)

408
vendor/github.com/zclconf/go-cty/cty/value_range.go generated vendored Normal file
View File

@ -0,0 +1,408 @@
package cty
import (
"fmt"
"math"
"strings"
)
// Range returns an object that offers partial information about the range
// of the receiver.
//
// This is most relevant for unknown values, because it gives access to any
// optional additional constraints on the final value (specified by the source
// of the value using "refinements") beyond what we can assume from the value's
// type.
//
// Calling Range for a known value is a little strange, but it's supported by
// returning a [ValueRange] object that describes the exact value as closely
// as possible. Typically a caller should work directly with the exact value
// in that case, but some purposes might only need the level of detail
// offered by ranges and so can share code between both known and unknown
// values.
func (v Value) Range() ValueRange {
// For an unknown value we just use its own refinements.
if unk, isUnk := v.v.(*unknownType); isUnk {
refinement := unk.refinement
if refinement == nil {
// We'll generate an unconstrained refinement, just to
// simplify the code in ValueRange methods which can
// therefore assume that there's always a refinement.
refinement = &refinementNullable{isNull: tristateUnknown}
}
return ValueRange{v.Type(), refinement}
}
if v.IsNull() {
// If we know a value is null then we'll just report that,
// since no other refinements make sense for a definitely-null value.
return ValueRange{
v.Type(),
&refinementNullable{isNull: tristateTrue},
}
}
// For a known value we construct synthetic refinements that match
// the value, just as a convenience for callers that want to share
// codepaths between both known and unknown values.
ty := v.Type()
var synth unknownValRefinement
switch {
case ty == String:
synth = &refinementString{
prefix: v.AsString(),
}
case ty == Number:
synth = &refinementNumber{
min: v,
max: v,
minInc: true,
maxInc: true,
}
case ty.IsCollectionType():
if lenVal := v.Length(); lenVal.IsKnown() {
l, _ := lenVal.AsBigFloat().Int64()
synth = &refinementCollection{
minLen: int(l),
maxLen: int(l),
}
} else {
synth = &refinementCollection{
minLen: 0,
maxLen: math.MaxInt,
}
}
default:
// If we don't have anything else to say then we can at least
// guarantee that the value isn't null.
synth = &refinementNullable{}
}
// If we get down here then the value is definitely not null
synth.setNull(tristateFalse)
return ValueRange{ty, synth}
}
// ValueRange offers partial information about the range of a value.
//
// This is primarily interesting for unknown values, because it provides access
// to any additional known constraints (specified using "refinements") on the
// range of the value beyond what is represented by the value's type.
type ValueRange struct {
ty Type
raw unknownValRefinement
}
// TypeConstraint returns a type constraint describing the value's type as
// precisely as possible with the available information.
func (r ValueRange) TypeConstraint() Type {
return r.ty
}
// CouldBeNull returns true unless the value being described is definitely
// known to represent a non-null value.
func (r ValueRange) CouldBeNull() bool {
if r.raw == nil {
// A totally-unconstrained unknown value could be null
return true
}
return r.raw.null() != tristateFalse
}
// DefinitelyNotNull returns true if there are no null values in the range.
func (r ValueRange) DefinitelyNotNull() bool {
if r.raw == nil {
// A totally-unconstrained unknown value could be null
return false
}
return r.raw.null() == tristateFalse
}
// NumberLowerBound returns information about the lower bound of the range of
// a number value, or panics if the value is definitely not a number.
//
// If the value is nullable then the result represents the range of the number
// only if it turns out not to be null.
//
// The resulting value might itself be an unknown number if there is no
// known lower bound. In that case the "inclusive" flag is meaningless.
func (r ValueRange) NumberLowerBound() (min Value, inclusive bool) {
if r.ty == DynamicPseudoType {
// We don't even know if this is a number yet.
return UnknownVal(Number), false
}
if r.ty != Number {
panic(fmt.Sprintf("NumberLowerBound for %#v", r.ty))
}
if rfn, ok := r.raw.(*refinementNumber); ok && rfn.min != NilVal {
if !rfn.min.IsKnown() {
return NegativeInfinity, true
}
return rfn.min, rfn.minInc
}
return NegativeInfinity, false
}
// NumberUpperBound returns information about the upper bound of the range of
// a number value, or panics if the value is definitely not a number.
//
// If the value is nullable then the result represents the range of the number
// only if it turns out not to be null.
//
// The resulting value might itself be an unknown number if there is no
// known upper bound. In that case the "inclusive" flag is meaningless.
func (r ValueRange) NumberUpperBound() (max Value, inclusive bool) {
if r.ty == DynamicPseudoType {
// We don't even know if this is a number yet.
return UnknownVal(Number), false
}
if r.ty != Number {
panic(fmt.Sprintf("NumberUpperBound for %#v", r.ty))
}
if rfn, ok := r.raw.(*refinementNumber); ok && rfn.max != NilVal {
if !rfn.max.IsKnown() {
return PositiveInfinity, true
}
return rfn.max, rfn.maxInc
}
return PositiveInfinity, false
}
// StringPrefix returns a string that is guaranteed to be the prefix of
// the string value being described, or panics if the value is definitely not
// a string.
//
// If the value is nullable then the result represents the prefix of the string
// only if it turns out to not be null.
//
// If the resulting value is zero-length then the value could potentially be
// a string but it has no known prefix.
//
// cty.String values always contain normalized UTF-8 sequences; the result is
// also guaranteed to be a normalized UTF-8 sequence so the result also
// represents the exact bytes of the string value's prefix.
func (r ValueRange) StringPrefix() string {
if r.ty == DynamicPseudoType {
// We don't even know if this is a string yet.
return ""
}
if r.ty != String {
panic(fmt.Sprintf("StringPrefix for %#v", r.ty))
}
if rfn, ok := r.raw.(*refinementString); ok {
return rfn.prefix
}
return ""
}
// LengthLowerBound returns information about the lower bound of the length of
// a collection-typed value, or panics if the value is definitely not a
// collection.
//
// If the value is nullable then the result represents the range of the length
// only if the value turns out not to be null.
func (r ValueRange) LengthLowerBound() int {
if r.ty == DynamicPseudoType {
// We don't even know if this is a collection yet.
return 0
}
if !r.ty.IsCollectionType() {
panic(fmt.Sprintf("LengthLowerBound for %#v", r.ty))
}
if rfn, ok := r.raw.(*refinementCollection); ok {
return rfn.minLen
}
return 0
}
// LengthUpperBound returns information about the upper bound of the length of
// a collection-typed value, or panics if the value is definitely not a
// collection.
//
// If the value is nullable then the result represents the range of the length
// only if the value turns out not to be null.
//
// The resulting value might itself be an unknown number if there is no
// known upper bound. In that case the "inclusive" flag is meaningless.
func (r ValueRange) LengthUpperBound() int {
if r.ty == DynamicPseudoType {
// We don't even know if this is a collection yet.
return math.MaxInt
}
if !r.ty.IsCollectionType() {
panic(fmt.Sprintf("LengthUpperBound for %#v", r.ty))
}
if rfn, ok := r.raw.(*refinementCollection); ok {
return rfn.maxLen
}
return math.MaxInt
}
// Includes determines whether the given value is in the receiving range.
//
// It can return only three possible values:
// - [cty.True] if the range definitely includes the value
// - [cty.False] if the range definitely does not include the value
// - An unknown value of [cty.Bool] if there isn't enough information to decide.
//
// This function is not fully comprehensive: it may return an unknown value
// in some cases where a definitive value could be computed in principle, and
// those same situations may begin returning known values in later releases as
// the rules are refined to be more complete. Currently the rules focus mainly
// on answering [cty.False], because disproving membership tends to be more
// useful than proving membership.
func (r ValueRange) Includes(v Value) Value {
unknownResult := UnknownVal(Bool).RefineNotNull()
if r.raw.null() == tristateTrue {
if v.IsNull() {
return True
} else {
return False
}
}
if r.raw.null() == tristateFalse {
if v.IsNull() {
return False
}
// A definitely-not-null value could potentially match
// but we won't know until we do some more checks below.
}
// If our range includes both null and non-null values and the value is
// null then it's definitely in range.
if v.IsNull() {
return True
}
if len(v.Type().TestConformance(r.TypeConstraint())) != 0 {
// If the value doesn't conform to the type constraint then it's
// definitely not in the range.
return False
}
if v.Type() == DynamicPseudoType {
// If it's an unknown value of an unknown type then there's no
// further tests we can make.
return unknownResult
}
switch r.raw.(type) {
case *refinementString:
if v.IsKnown() {
prefix := r.StringPrefix()
got := v.AsString()
if !strings.HasPrefix(got, prefix) {
return False
}
}
case *refinementCollection:
lenVal := v.Length()
minLen := NumberIntVal(int64(r.LengthLowerBound()))
maxLen := NumberIntVal(int64(r.LengthUpperBound()))
if minOk := lenVal.GreaterThanOrEqualTo(minLen); minOk.IsKnown() && minOk.False() {
return False
}
if maxOk := lenVal.LessThanOrEqualTo(maxLen); maxOk.IsKnown() && maxOk.False() {
return False
}
case *refinementNumber:
minVal, minInc := r.NumberLowerBound()
maxVal, maxInc := r.NumberUpperBound()
var minOk, maxOk Value
if minInc {
minOk = v.GreaterThanOrEqualTo(minVal)
} else {
minOk = v.GreaterThan(minVal)
}
if maxInc {
maxOk = v.LessThanOrEqualTo(maxVal)
} else {
maxOk = v.LessThan(maxVal)
}
if minOk.IsKnown() && minOk.False() {
return False
}
if maxOk.IsKnown() && maxOk.False() {
return False
}
}
// If we fall out here then we don't have enough information to decide.
return unknownResult
}
// numericRangeArithmetic is a helper we use to calculate derived numeric ranges
// for arithmetic on refined numeric values.
//
// op must be a monotone operation. numericRangeArithmetic adapts that operation
// into the equivalent interval arithmetic operation.
//
// The result is a superset of the range of the given operation against the
// given input ranges, if it's possible to calculate that without encountering
// an invalid operation. Currently the result is inexact due to ignoring
// the inclusiveness of the input bounds and just always returning inclusive
// bounds.
func numericRangeArithmetic(op func(a, b Value) Value, a, b ValueRange) func(*RefinementBuilder) *RefinementBuilder {
wrapOp := func(a, b Value) (ret Value) {
// Our functions have various panicking edge cases involving incompatible
// uses of infinities. To keep things simple here we'll catch those
// and just return an unconstrained number.
defer func() {
if v := recover(); v != nil {
ret = UnknownVal(Number)
}
}()
return op(a, b)
}
return func(builder *RefinementBuilder) *RefinementBuilder {
aMin, _ := a.NumberLowerBound()
aMax, _ := a.NumberUpperBound()
bMin, _ := b.NumberLowerBound()
bMax, _ := b.NumberUpperBound()
v1 := wrapOp(aMin, bMin)
v2 := wrapOp(aMin, bMax)
v3 := wrapOp(aMax, bMin)
v4 := wrapOp(aMax, bMax)
newMin := mostNumberValue(Value.LessThan, v1, v2, v3, v4)
newMax := mostNumberValue(Value.GreaterThan, v1, v2, v3, v4)
if isInf := newMin.Equals(NegativeInfinity); isInf.IsKnown() && isInf.False() {
builder = builder.NumberRangeLowerBound(newMin, true)
}
if isInf := newMax.Equals(PositiveInfinity); isInf.IsKnown() && isInf.False() {
builder = builder.NumberRangeUpperBound(newMax, true)
}
return builder
}
}
func mostNumberValue(op func(i, j Value) Value, v1 Value, vN ...Value) Value {
r := v1
for _, v := range vN {
more := op(v, r)
if !more.IsKnown() {
return UnknownVal(Number)
}
if more.True() {
r = v
}
}
return r
}
// definitelyNotNull is a convenient helper for the common situation of checking
// whether a value could possibly be null.
//
// Returns true if the given value is either a known value that isn't null
// or an unknown value that has been refined to exclude null values from its
// range.
func definitelyNotNull(v Value) bool {
if v.IsKnown() {
return !v.IsNull()
}
return v.Range().DefinitelyNotNull()
}