vendor: github.com/aws/aws-sdk-go-v2/config v1.26.6

vendor github.com/aws/aws-sdk-go-v2/config v1.26.6 and related dependencies.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2024-02-05 18:08:03 +01:00
parent 089982153f
commit 43ed470208
190 changed files with 12340 additions and 13837 deletions

View File

@ -1,3 +1,98 @@
# v1.7.3 (2024-01-22)
* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
# v1.7.2 (2023-12-08)
* **Bug Fix**: Correct loading of [services *] sections into shared config.
# v1.7.1 (2023-11-16)
* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
# v1.7.0 (2023-11-13)
* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
# v1.6.0 (2023-11-09.2)
* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
# v1.5.2 (2023-11-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.1 (2023-11-07)
* **Bug Fix**: Fix subproperty performance regression
# v1.5.0 (2023-11-01)
* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.4.0 (2023-10-31)
* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.45 (2023-10-12)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.44 (2023-10-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.43 (2023-09-22)
* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
# v1.3.42 (2023-08-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.41 (2023-08-18)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.40 (2023-08-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.39 (2023-08-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.38 (2023-07-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.37 (2023-07-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.36 (2023-07-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.35 (2023-06-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.34 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.33 (2023-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.32 (2023-03-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.31 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -1,120 +0,0 @@
package ini
// ASTKind represents different states in the parse table
// and the type of AST that is being constructed
type ASTKind int
// ASTKind* is used in the parse table to transition between
// the different states
const (
ASTKindNone = ASTKind(iota)
ASTKindStart
ASTKindExpr
ASTKindEqualExpr
ASTKindStatement
ASTKindSkipStatement
ASTKindExprStatement
ASTKindSectionStatement
ASTKindNestedSectionStatement
ASTKindCompletedNestedSectionStatement
ASTKindCommentStatement
ASTKindCompletedSectionStatement
)
func (k ASTKind) String() string {
switch k {
case ASTKindNone:
return "none"
case ASTKindStart:
return "start"
case ASTKindExpr:
return "expr"
case ASTKindStatement:
return "stmt"
case ASTKindSectionStatement:
return "section_stmt"
case ASTKindExprStatement:
return "expr_stmt"
case ASTKindCommentStatement:
return "comment"
case ASTKindNestedSectionStatement:
return "nested_section_stmt"
case ASTKindCompletedSectionStatement:
return "completed_stmt"
case ASTKindSkipStatement:
return "skip"
default:
return ""
}
}
// AST interface allows us to determine what kind of node we
// are on and casting may not need to be necessary.
//
// The root is always the first node in Children
type AST struct {
Kind ASTKind
Root Token
RootToken bool
Children []AST
}
func newAST(kind ASTKind, root AST, children ...AST) AST {
return AST{
Kind: kind,
Children: append([]AST{root}, children...),
}
}
func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
return AST{
Kind: kind,
Root: root,
RootToken: true,
Children: children,
}
}
// AppendChild will append to the list of children an AST has.
func (a *AST) AppendChild(child AST) {
a.Children = append(a.Children, child)
}
// GetRoot will return the root AST which can be the first entry
// in the children list or a token.
func (a *AST) GetRoot() AST {
if a.RootToken {
return *a
}
if len(a.Children) == 0 {
return AST{}
}
return a.Children[0]
}
// GetChildren will return the current AST's list of children
func (a *AST) GetChildren() []AST {
if len(a.Children) == 0 {
return []AST{}
}
if a.RootToken {
return a.Children
}
return a.Children[1:]
}
// SetChildren will set and override all children of the AST.
func (a *AST) SetChildren(children []AST) {
if a.RootToken {
a.Children = children
} else {
a.Children = append(a.Children[:1], children...)
}
}
// Start is used to indicate the starting state of the parse table.
var Start = newAST(ASTKindStart, AST{})

View File

@ -1,11 +0,0 @@
package ini
var commaRunes = []rune(",")
func isComma(b rune) bool {
return b == ','
}
func newCommaToken() Token {
return newToken(TokenComma, commaRunes, NoneType)
}

View File

@ -1,35 +0,0 @@
package ini
// isComment will return whether or not the next byte(s) is a
// comment.
func isComment(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case ';':
return true
case '#':
return true
}
return false
}
// newCommentToken will create a comment token and
// return how many bytes were read.
func newCommentToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if b[i] == '\n' {
break
}
if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
break
}
}
return newToken(TokenComment, b[:i], NoneType), i, nil
}

View File

@ -1,6 +0,0 @@
package ini
import (
// internal/ini module was carved out of this module
_ "github.com/aws/aws-sdk-go-v2"
)

View File

@ -1,43 +0,0 @@
// Package ini is an LL(1) parser for configuration files.
//
// Example:
// sections, err := ini.OpenFile("/path/to/file")
// if err != nil {
// panic(err)
// }
//
// profile := "foo"
// section, ok := sections.GetSection(profile)
// if !ok {
// fmt.Printf("section %q could not be found", profile)
// }
//
// Below is the BNF that describes this parser
//
// Grammar:
// stmt -> section | stmt'
// stmt' -> epsilon | expr
// expr -> value (stmt)* | equal_expr (stmt)*
// equal_expr -> value ( ':' | '=' ) equal_expr'
// equal_expr' -> number | string | quoted_string
// quoted_string -> " quoted_string'
// quoted_string' -> string quoted_string_end
// quoted_string_end -> "
//
// section -> [ section'
// section' -> section_value section_close
// section_value -> number | string_subset | boolean | quoted_string_subset
// quoted_string_subset -> " quoted_string_subset'
// quoted_string_subset' -> string_subset quoted_string_end
// quoted_string_subset -> "
// section_close -> ]
//
// value -> number | string_subset | boolean
// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ?
// string_subset -> ? Code-points excepted by <string> grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ?
//
// SkipState will skip (NL WS)+
//
// comment -> # comment' | ; comment'
// comment' -> epsilon | value
package ini

View File

@ -1,4 +0,0 @@
package ini
// emptyToken is used to satisfy the Token interface
var emptyToken = newToken(TokenNone, []rune{}, NoneType)

View File

@ -1,24 +0,0 @@
package ini
// newExpression will return an expression AST.
// Expr represents an expression
//
// grammar:
// expr -> string | number
func newExpression(tok Token) AST {
return newASTWithRootToken(ASTKindExpr, tok)
}
func newEqualExpr(left AST, tok Token) AST {
return newASTWithRootToken(ASTKindEqualExpr, tok, left)
}
// EqualExprKey will return a LHS value in the equal expr
func EqualExprKey(ast AST) string {
children := ast.GetChildren()
if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
return ""
}
return string(children[0].Root.Raw())
}

View File

@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.31"
const goModuleVersion = "1.7.3"

View File

@ -1,13 +1,26 @@
// Package ini implements parsing of the AWS shared config file.
//
// Example:
// sections, err := ini.OpenFile("/path/to/file")
// if err != nil {
// panic(err)
// }
//
// profile := "foo"
// section, ok := sections.GetSection(profile)
// if !ok {
// fmt.Printf("section %q could not be found", profile)
// }
package ini
import (
"fmt"
"io"
"os"
"strings"
)
// OpenFile takes a path to a given file, and will open and parse
// that file.
// OpenFile parses shared config from the given file path.
func OpenFile(path string) (sections Sections, err error) {
f, oerr := os.Open(path)
if oerr != nil {
@ -26,33 +39,18 @@ func OpenFile(path string) (sections Sections, err error) {
return Parse(f, path)
}
// Parse will parse the given file using the shared config
// visitor.
func Parse(f io.Reader, path string) (Sections, error) {
tree, err := ParseAST(f)
// Parse parses shared config from the given reader.
func Parse(r io.Reader, path string) (Sections, error) {
contents, err := io.ReadAll(r)
if err != nil {
return Sections{}, err
return Sections{}, fmt.Errorf("read all: %v", err)
}
v := NewDefaultVisitor(path)
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}
// ParseBytes will parse the given bytes and return the parsed sections.
func ParseBytes(b []byte) (Sections, error) {
tree, err := ParseASTBytes(b)
lines := strings.Split(string(contents), "\n")
tokens, err := tokenize(lines)
if err != nil {
return Sections{}, err
return Sections{}, fmt.Errorf("tokenize: %v", err)
}
v := NewDefaultVisitor("")
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
return parse(tokens, path), nil
}

View File

@ -1,157 +0,0 @@
package ini
import (
"bytes"
"io"
"io/ioutil"
)
// TokenType represents the various different tokens types
type TokenType int
func (t TokenType) String() string {
switch t {
case TokenNone:
return "none"
case TokenLit:
return "literal"
case TokenSep:
return "sep"
case TokenOp:
return "op"
case TokenWS:
return "ws"
case TokenNL:
return "newline"
case TokenComment:
return "comment"
case TokenComma:
return "comma"
default:
return ""
}
}
// TokenType enums
const (
TokenNone = TokenType(iota)
TokenLit
TokenSep
TokenComma
TokenOp
TokenWS
TokenNL
TokenComment
)
type iniLexer struct{}
// Tokenize will return a list of tokens during lexical analysis of the
// io.Reader.
func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, &UnableToReadFile{Err: err}
}
return l.tokenize(b)
}
func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
runes := bytes.Runes(b)
var err error
n := 0
tokenAmount := countTokens(runes)
tokens := make([]Token, tokenAmount)
count := 0
for len(runes) > 0 && count < tokenAmount {
switch {
case isWhitespace(runes[0]):
tokens[count], n, err = newWSToken(runes)
case isComma(runes[0]):
tokens[count], n = newCommaToken(), 1
case isComment(runes):
tokens[count], n, err = newCommentToken(runes)
case isNewline(runes):
tokens[count], n, err = newNewlineToken(runes)
case isSep(runes):
tokens[count], n, err = newSepToken(runes)
case isOp(runes):
tokens[count], n, err = newOpToken(runes)
default:
tokens[count], n, err = newLitToken(runes)
}
if err != nil {
return nil, err
}
count++
runes = runes[n:]
}
return tokens[:count], nil
}
func countTokens(runes []rune) int {
count, n := 0, 0
var err error
for len(runes) > 0 {
switch {
case isWhitespace(runes[0]):
_, n, err = newWSToken(runes)
case isComma(runes[0]):
_, n = newCommaToken(), 1
case isComment(runes):
_, n, err = newCommentToken(runes)
case isNewline(runes):
_, n, err = newNewlineToken(runes)
case isSep(runes):
_, n, err = newSepToken(runes)
case isOp(runes):
_, n, err = newOpToken(runes)
default:
_, n, err = newLitToken(runes)
}
if err != nil {
return 0
}
count++
runes = runes[n:]
}
return count + 1
}
// Token indicates a metadata about a given value.
type Token struct {
t TokenType
ValueType ValueType
base int
raw []rune
}
var emptyValue = Value{}
func newToken(t TokenType, raw []rune, v ValueType) Token {
return Token{
t: t,
raw: raw,
ValueType: v,
}
}
// Raw return the raw runes that were consumed
func (tok Token) Raw() []rune {
return tok.raw
}
// Type returns the token type
func (tok Token) Type() TokenType {
return tok.t
}

View File

@ -1,349 +0,0 @@
package ini
import (
"fmt"
"io"
)
// ParseState represents the current state of the parser.
type ParseState uint
// State enums for the parse table
const (
InvalidState ParseState = iota
// stmt -> value stmt'
StatementState
// stmt' -> MarkComplete | op stmt
StatementPrimeState
// value -> number | string | boolean | quoted_string
ValueState
// section -> [ section'
OpenScopeState
// section' -> value section_close
SectionState
// section_close -> ]
CloseScopeState
// SkipState will skip (NL WS)+
SkipState
// SkipTokenState will skip any token and push the previous
// state onto the stack.
SkipTokenState
// comment -> # comment' | ; comment'
// comment' -> MarkComplete | value
CommentState
// MarkComplete state will complete statements and move that
// to the completed AST list
MarkCompleteState
// TerminalState signifies that the tokens have been fully parsed
TerminalState
)
// parseTable is a state machine to dictate the grammar above.
var parseTable = map[ASTKind]map[TokenType]ParseState{
ASTKindStart: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
ASTKindCommentStatement: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExpr: {
TokenOp: StatementPrimeState,
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenWS: ValueState,
TokenNL: SkipState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindEqualExpr: {
TokenLit: ValueState,
TokenSep: ValueState,
TokenOp: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
},
ASTKindStatement: {
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExprStatement: {
TokenLit: ValueState,
TokenSep: ValueState,
TokenOp: ValueState,
TokenWS: ValueState,
TokenNL: MarkCompleteState,
TokenComment: CommentState,
TokenNone: TerminalState,
TokenComma: SkipState,
},
ASTKindSectionStatement: {
TokenLit: SectionState,
TokenOp: SectionState,
TokenSep: CloseScopeState,
TokenWS: SectionState,
TokenNL: SkipTokenState,
},
ASTKindCompletedSectionStatement: {
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindSkipStatement: {
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
}
// ParseAST will parse input from an io.Reader using
// an LL(1) parser.
func ParseAST(r io.Reader) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.Tokenize(r)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
// ParseASTBytes will parse input from a byte slice using
// an LL(1) parser.
func ParseASTBytes(b []byte) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.tokenize(b)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
func parse(tokens []Token) ([]AST, error) {
start := Start
stack := newParseStack(3, len(tokens))
stack.Push(start)
s := newSkipper()
loop:
for stack.Len() > 0 {
k := stack.Pop()
var tok Token
if len(tokens) == 0 {
// this occurs when all the tokens have been processed
// but reduction of what's left on the stack needs to
// occur.
tok = emptyToken
} else {
tok = tokens[0]
}
step := parseTable[k.Kind][tok.Type()]
if s.ShouldSkip(tok) {
// being in a skip state with no tokens will break out of
// the parse loop since there is nothing left to process.
if len(tokens) == 0 {
break loop
}
// if should skip is true, we skip the tokens until should skip is set to false.
step = SkipTokenState
}
switch step {
case TerminalState:
// Finished parsing. Push what should be the last
// statement to the stack. If there is anything left
// on the stack, an error in parsing has occurred.
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
break loop
case SkipTokenState:
// When skipping a token, the previous state was popped off the stack.
// To maintain the correct state, the previous state will be pushed
// onto the stack.
stack.Push(k)
case StatementState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
expr := newExpression(tok)
stack.Push(expr)
case StatementPrimeState:
if tok.Type() != TokenOp {
stack.MarkComplete(k)
continue
}
if k.Kind != ASTKindExpr {
return nil, NewParseError(
fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
)
}
k = trimSpaces(k)
expr := newEqualExpr(k, tok)
stack.Push(expr)
case ValueState:
// ValueState requires the previous state to either be an equal expression
// or an expression statement.
switch k.Kind {
case ASTKindEqualExpr:
// assigning a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stack.Push(k)
case ASTKindExprStatement:
root := k.GetRoot()
children := root.GetChildren()
if len(children) == 0 {
return nil, NewParseError(
fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
)
}
rhs := children[len(children)-1]
if rhs.Root.ValueType != QuotedStringType {
rhs.Root.ValueType = StringType
rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
}
children[len(children)-1] = rhs
root.SetChildren(children)
stack.Push(k)
}
case OpenScopeState:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
// If OpenScopeState is not at the start, we must mark the previous ast as complete
//
// for example: if previous ast was a skip statement;
// we should mark it as complete before we create a new statement
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
stmt := newStatement()
stack.Push(stmt)
case CloseScopeState:
if !runeCompare(tok.Raw(), closeBrace) {
return nil, NewParseError("expected ']'")
}
k = trimSpaces(k)
stack.Push(newCompletedSectionStatement(k))
case SectionState:
var stmt AST
switch k.Kind {
case ASTKindStatement:
// If there are multiple literals inside of a scope declaration,
// then the current token's raw value will be appended to the Name.
//
// This handles cases like [ profile default ]
//
// k will represent a SectionStatement with the children representing
// the label of the section
stmt = newSectionStatement(tok)
case ASTKindSectionStatement:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stmt = k
default:
return nil, NewParseError(
fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
)
}
stack.Push(stmt)
case MarkCompleteState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
if stack.Len() == 0 {
stack.Push(start)
}
case SkipState:
stack.Push(newSkipStatement(k))
s.Skip()
case CommentState:
if k.Kind == ASTKindStart {
stack.Push(k)
} else {
stack.MarkComplete(k)
}
stmt := newCommentStatement(tok)
stack.Push(stmt)
default:
return nil, NewParseError(
fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
k.Kind, tok.Type()))
}
if len(tokens) > 0 {
tokens = tokens[1:]
}
}
// this occurs when a statement has not been completed
if stack.top > 1 {
return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
}
// returns a sublist which exludes the start symbol
return stack.List(), nil
}
// trimSpaces will trim spaces on the left and right hand side of
// the literal.
func trimSpaces(k AST) AST {
// trim left hand side of spaces
for i := 0; i < len(k.Root.raw); i++ {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[1:]
i--
}
// trim right hand side of spaces
for i := len(k.Root.raw) - 1; i >= 0; i-- {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
}
return k
}

View File

@ -1,336 +0,0 @@
package ini
import (
"fmt"
"strconv"
"strings"
"unicode"
)
var (
runesTrue = []rune("true")
runesFalse = []rune("false")
)
var literalValues = [][]rune{
runesTrue,
runesFalse,
}
func isBoolValue(b []rune) bool {
for _, lv := range literalValues {
if isCaselessLitValue(lv, b) {
return true
}
}
return false
}
func isLitValue(want, have []rune) bool {
if len(have) < len(want) {
return false
}
for i := 0; i < len(want); i++ {
if want[i] != have[i] {
return false
}
}
return true
}
// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency.
func isCaselessLitValue(want, have []rune) bool {
if len(have) < len(want) {
return false
}
for i := 0; i < len(want); i++ {
if want[i] != unicode.ToLower(have[i]) {
return false
}
}
return true
}
// isNumberValue will return whether not the leading characters in
// a byte slice is a number. A number is delimited by whitespace or
// the newline token.
//
// A number is defined to be in a binary, octal, decimal (int | float), hex format,
// or in scientific notation.
func isNumberValue(b []rune) bool {
negativeIndex := 0
helper := numberHelper{}
needDigit := false
for i := 0; i < len(b); i++ {
negativeIndex++
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return false
}
helper.Determine(b[i])
needDigit = true
continue
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return false
}
negativeIndex = 0
needDigit = true
continue
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
needDigit = true
if i == 0 {
return false
}
fallthrough
case '.':
if err := helper.Determine(b[i]); err != nil {
return false
}
needDigit = true
continue
}
if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
return !needDigit
}
if !helper.CorrectByte(b[i]) {
return false
}
needDigit = false
}
return !needDigit
}
func isValid(b []rune) (bool, int, error) {
if len(b) == 0 {
// TODO: should probably return an error
return false, 0, nil
}
return isValidRune(b[0]), 1, nil
}
func isValidRune(r rune) bool {
return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
}
// ValueType is an enum that will signify what type
// the Value is
type ValueType int
func (v ValueType) String() string {
switch v {
case NoneType:
return "NONE"
case DecimalType:
return "FLOAT"
case IntegerType:
return "INT"
case StringType:
return "STRING"
case BoolType:
return "BOOL"
}
return ""
}
// ValueType enums
const (
NoneType = ValueType(iota)
DecimalType
IntegerType
StringType
QuotedStringType
BoolType
)
// Value is a union container
type Value struct {
Type ValueType
raw []rune
integer int64
decimal float64
boolean bool
str string
}
func newValue(t ValueType, base int, raw []rune) (Value, error) {
v := Value{
Type: t,
raw: raw,
}
var err error
switch t {
case DecimalType:
v.decimal, err = strconv.ParseFloat(string(raw), 64)
case IntegerType:
if base != 10 {
raw = raw[2:]
}
v.integer, err = strconv.ParseInt(string(raw), base, 64)
case StringType:
v.str = string(raw)
case QuotedStringType:
v.str = string(raw[1 : len(raw)-1])
case BoolType:
v.boolean = isCaselessLitValue(runesTrue, v.raw)
}
// issue 2253
//
// if the value trying to be parsed is too large, then we will use
// the 'StringType' and raw value instead.
if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
v.Type = StringType
v.str = string(raw)
err = nil
}
return v, err
}
// NewStringValue returns a Value type generated using a string input.
func NewStringValue(str string) (Value, error) {
return newValue(StringType, 10, []rune(str))
}
// NewIntValue returns a Value type generated using an int64 input.
func NewIntValue(i int64) (Value, error) {
v := strconv.FormatInt(i, 10)
return newValue(IntegerType, 10, []rune(v))
}
func (v Value) String() string {
switch v.Type {
case DecimalType:
return fmt.Sprintf("decimal: %f", v.decimal)
case IntegerType:
return fmt.Sprintf("integer: %d", v.integer)
case StringType:
return fmt.Sprintf("string: %s", string(v.raw))
case QuotedStringType:
return fmt.Sprintf("quoted string: %s", string(v.raw))
case BoolType:
return fmt.Sprintf("bool: %t", v.boolean)
default:
return "union not set"
}
}
func newLitToken(b []rune) (Token, int, error) {
n := 0
var err error
token := Token{}
if b[0] == '"' {
n, err = getStringValue(b)
if err != nil {
return token, n, err
}
token = newToken(TokenLit, b[:n], QuotedStringType)
} else if isNumberValue(b) {
var base int
base, n, err = getNumericalValue(b)
if err != nil {
return token, 0, err
}
value := b[:n]
vType := IntegerType
if contains(value, '.') || hasExponent(value) {
vType = DecimalType
}
token = newToken(TokenLit, value, vType)
token.base = base
} else if isBoolValue(b) {
n, err = getBoolValue(b)
token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
}
return token, n, err
}
// IntValue returns an integer value
func (v Value) IntValue() int64 {
return v.integer
}
// FloatValue returns a float value
func (v Value) FloatValue() float64 {
return v.decimal
}
// BoolValue returns a bool value
func (v Value) BoolValue() bool {
return v.boolean
}
func isTrimmable(r rune) bool {
switch r {
case '\n', ' ':
return true
}
return false
}
// StringValue returns the string value
func (v Value) StringValue() string {
switch v.Type {
case StringType:
return strings.TrimFunc(string(v.raw), isTrimmable)
case QuotedStringType:
// preserve all characters in the quotes
return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
default:
return strings.TrimFunc(string(v.raw), isTrimmable)
}
}
func contains(runes []rune, c rune) bool {
for i := 0; i < len(runes); i++ {
if runes[i] == c {
return true
}
}
return false
}
func runeCompare(v1 []rune, v2 []rune) bool {
if len(v1) != len(v2) {
return false
}
for i := 0; i < len(v1); i++ {
if v1[i] != v2[i] {
return false
}
}
return true
}

View File

@ -1,30 +0,0 @@
package ini
func isNewline(b []rune) bool {
if len(b) == 0 {
return false
}
if b[0] == '\n' {
return true
}
if len(b) < 2 {
return false
}
return b[0] == '\r' && b[1] == '\n'
}
func newNewlineToken(b []rune) (Token, int, error) {
i := 1
if b[0] == '\r' && isNewline(b[1:]) {
i++
}
if !isNewline([]rune(b[:i])) {
return emptyToken, 0, NewParseError("invalid new line token")
}
return newToken(TokenNL, b[:i], NoneType), i, nil
}

View File

@ -1,152 +0,0 @@
package ini
import (
"bytes"
"fmt"
"strconv"
)
const (
none = numberFormat(iota)
binary
octal
decimal
hex
exponent
)
type numberFormat int
// numberHelper is used to dictate what format a number is in
// and what to do for negative values. Since -1e-4 is a valid
// number, we cannot just simply check for duplicate negatives.
type numberHelper struct {
numberFormat numberFormat
negative bool
negativeExponent bool
}
func (b numberHelper) Exists() bool {
return b.numberFormat != none
}
func (b numberHelper) IsNegative() bool {
return b.negative || b.negativeExponent
}
func (b *numberHelper) Determine(c rune) error {
if b.Exists() {
return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
}
switch c {
case 'b':
b.numberFormat = binary
case 'o':
b.numberFormat = octal
case 'x':
b.numberFormat = hex
case 'e', 'E':
b.numberFormat = exponent
case '-':
if b.numberFormat != exponent {
b.negative = true
} else {
b.negativeExponent = true
}
case '.':
b.numberFormat = decimal
default:
return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
}
return nil
}
func (b numberHelper) CorrectByte(c rune) bool {
switch {
case b.numberFormat == binary:
if !isBinaryByte(c) {
return false
}
case b.numberFormat == octal:
if !isOctalByte(c) {
return false
}
case b.numberFormat == hex:
if !isHexByte(c) {
return false
}
case b.numberFormat == decimal:
if !isDigit(c) {
return false
}
case b.numberFormat == exponent:
if !isDigit(c) {
return false
}
case b.negativeExponent:
if !isDigit(c) {
return false
}
case b.negative:
if !isDigit(c) {
return false
}
default:
if !isDigit(c) {
return false
}
}
return true
}
func (b numberHelper) Base() int {
switch b.numberFormat {
case binary:
return 2
case octal:
return 8
case hex:
return 16
default:
return 10
}
}
func (b numberHelper) String() string {
buf := bytes.Buffer{}
i := 0
switch b.numberFormat {
case binary:
i++
buf.WriteString(strconv.Itoa(i) + ": binary format\n")
case octal:
i++
buf.WriteString(strconv.Itoa(i) + ": octal format\n")
case hex:
i++
buf.WriteString(strconv.Itoa(i) + ": hex format\n")
case exponent:
i++
buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
default:
i++
buf.WriteString(strconv.Itoa(i) + ": integer format\n")
}
if b.negative {
i++
buf.WriteString(strconv.Itoa(i) + ": negative format\n")
}
if b.negativeExponent {
i++
buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
}
return buf.String()
}

View File

@ -1,39 +0,0 @@
package ini
import (
"fmt"
)
var (
equalOp = []rune("=")
equalColonOp = []rune(":")
)
func isOp(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '=':
return true
case ':':
return true
default:
return false
}
}
func newOpToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '=':
tok = newToken(TokenOp, equalOp, NoneType)
case ':':
tok = newToken(TokenOp, equalColonOp, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -0,0 +1,109 @@
package ini
import (
"fmt"
"strings"
)
func parse(tokens []lineToken, path string) Sections {
parser := &parser{
path: path,
sections: NewSections(),
}
parser.parse(tokens)
return parser.sections
}
type parser struct {
csection, ckey string // current state
path string // source file path
sections Sections // parse result
}
func (p *parser) parse(tokens []lineToken) {
for _, otok := range tokens {
switch tok := otok.(type) {
case *lineTokenProfile:
p.handleProfile(tok)
case *lineTokenProperty:
p.handleProperty(tok)
case *lineTokenSubProperty:
p.handleSubProperty(tok)
case *lineTokenContinuation:
p.handleContinuation(tok)
}
}
}
func (p *parser) handleProfile(tok *lineTokenProfile) {
name := tok.Name
if tok.Type != "" {
name = fmt.Sprintf("%s %s", tok.Type, tok.Name)
}
p.ckey = ""
p.csection = name
if _, ok := p.sections.container[name]; !ok {
p.sections.container[name] = NewSection(name)
}
}
func (p *parser) handleProperty(tok *lineTokenProperty) {
if p.csection == "" {
return // LEGACY: don't error on "global" properties
}
p.ckey = tok.Key
if _, ok := p.sections.container[p.csection].values[tok.Key]; ok {
section := p.sections.container[p.csection]
section.Logs = append(p.sections.container[p.csection].Logs,
fmt.Sprintf(
"For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n",
p.csection, tok.Key, tok.Key, p.path,
),
)
p.sections.container[p.csection] = section
}
p.sections.container[p.csection].values[tok.Key] = Value{
str: tok.Value,
}
p.sections.container[p.csection].SourceFile[tok.Key] = p.path
}
func (p *parser) handleSubProperty(tok *lineTokenSubProperty) {
if p.csection == "" {
return // LEGACY: don't error on "global" properties
}
if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" {
// This is an "orphaned" subproperty, either because it's at
// the beginning of a section or because the last property's
// value isn't empty. Either way we're lenient here and
// "promote" this to a normal property.
p.handleProperty(&lineTokenProperty{
Key: tok.Key,
Value: strings.TrimSpace(trimPropertyComment(tok.Value)),
})
return
}
if p.sections.container[p.csection].values[p.ckey].mp == nil {
p.sections.container[p.csection].values[p.ckey] = Value{
mp: map[string]string{},
}
}
p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value
}
func (p *parser) handleContinuation(tok *lineTokenContinuation) {
if p.ckey == "" {
return
}
value, _ := p.sections.container[p.csection].values[p.ckey]
if value.str != "" && value.mp == nil {
value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value)
}
p.sections.container[p.csection].values[p.ckey] = value
}

View File

@ -1,19 +0,0 @@
package ini
// ParseError is an error which is returned during any part of
// the parsing process.
type ParseError struct {
msg string
}
// NewParseError will return a new ParseError where message
// is the description of the error.
func NewParseError(message string) *ParseError {
return &ParseError{
msg: message,
}
}
func (err *ParseError) Error() string {
return err.msg
}

View File

@ -1,60 +0,0 @@
package ini
import (
"bytes"
"fmt"
)
// ParseStack is a stack that contains a container, the stack portion,
// and the list which is the list of ASTs that have been successfully
// parsed.
type ParseStack struct {
top int
container []AST
list []AST
index int
}
func newParseStack(sizeContainer, sizeList int) ParseStack {
return ParseStack{
container: make([]AST, sizeContainer),
list: make([]AST, sizeList),
}
}
// Pop will return and truncate the last container element.
func (s *ParseStack) Pop() AST {
s.top--
return s.container[s.top]
}
// Push will add the new AST to the container
func (s *ParseStack) Push(ast AST) {
s.container[s.top] = ast
s.top++
}
// MarkComplete will append the AST to the list of completed statements
func (s *ParseStack) MarkComplete(ast AST) {
s.list[s.index] = ast
s.index++
}
// List will return the completed statements
func (s ParseStack) List() []AST {
return s.list[:s.index]
}
// Len will return the length of the container
func (s *ParseStack) Len() int {
return s.top
}
func (s ParseStack) String() string {
buf := bytes.Buffer{}
for i, node := range s.list {
buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
}
return buf.String()
}

View File

@ -0,0 +1,157 @@
package ini
import (
"sort"
)
// Sections is a map of Section structures that represent
// a configuration.
type Sections struct {
container map[string]Section
}
// NewSections returns empty ini Sections
func NewSections() Sections {
return Sections{
container: make(map[string]Section, 0),
}
}
// GetSection will return section p. If section p does not exist,
// false will be returned in the second parameter.
func (t Sections) GetSection(p string) (Section, bool) {
v, ok := t.container[p]
return v, ok
}
// HasSection denotes if Sections consist of a section with
// provided name.
func (t Sections) HasSection(p string) bool {
_, ok := t.container[p]
return ok
}
// SetSection sets a section value for provided section name.
func (t Sections) SetSection(p string, v Section) Sections {
t.container[p] = v
return t
}
// DeleteSection deletes a section entry/value for provided section name./
func (t Sections) DeleteSection(p string) {
delete(t.container, p)
}
// values represents a map of union values.
type values map[string]Value
// List will return a list of all sections that were successfully
// parsed.
func (t Sections) List() []string {
keys := make([]string, len(t.container))
i := 0
for k := range t.container {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// Section contains a name and values. This represent
// a sectioned entry in a configuration file.
type Section struct {
// Name is the Section profile name
Name string
// values are the values within parsed profile
values values
// Errors is the list of errors
Errors []error
// Logs is the list of logs
Logs []string
// SourceFile is the INI Source file from where this section
// was retrieved. They key is the property, value is the
// source file the property was retrieved from.
SourceFile map[string]string
}
// NewSection returns an initialize section for the name
func NewSection(name string) Section {
return Section{
Name: name,
values: values{},
SourceFile: map[string]string{},
}
}
// List will return a list of all
// services in values
func (t Section) List() []string {
keys := make([]string, len(t.values))
i := 0
for k := range t.values {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// UpdateSourceFile updates source file for a property to provided filepath.
func (t Section) UpdateSourceFile(property string, filepath string) {
t.SourceFile[property] = filepath
}
// UpdateValue updates value for a provided key with provided value
func (t Section) UpdateValue(k string, v Value) error {
t.values[k] = v
return nil
}
// Has will return whether or not an entry exists in a given section
func (t Section) Has(k string) bool {
_, ok := t.values[k]
return ok
}
// ValueType will returned what type the union is set to. If
// k was not found, the NoneType will be returned.
func (t Section) ValueType(k string) (ValueType, bool) {
v, ok := t.values[k]
return v.Type, ok
}
// Bool returns a bool value at k
func (t Section) Bool(k string) (bool, bool) {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) (int64, bool) {
return t.values[k].IntValue()
}
// Map returns a map value at k
func (t Section) Map(k string) map[string]string {
return t.values[k].MapValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) (float64, bool) {
return t.values[k].FloatValue()
}
// String returns the string value at k
func (t Section) String(k string) string {
_, ok := t.values[k]
if !ok {
return ""
}
return t.values[k].StringValue()
}

View File

@ -1,41 +0,0 @@
package ini
import (
"fmt"
)
var (
emptyRunes = []rune{}
)
func isSep(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '[', ']':
return true
default:
return false
}
}
var (
openBrace = []rune("[")
closeBrace = []rune("]")
)
func newSepToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '[':
tok = newToken(TokenSep, openBrace, NoneType)
case ']':
tok = newToken(TokenSep, closeBrace, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -1,45 +0,0 @@
package ini
// skipper is used to skip certain blocks of an ini file.
// Currently skipper is used to skip nested blocks of ini
// files. See example below
//
// [ foo ]
// nested = ; this section will be skipped
// a=b
// c=d
// bar=baz ; this will be included
type skipper struct {
shouldSkip bool
TokenSet bool
prevTok Token
}
func newSkipper() skipper {
return skipper{
prevTok: emptyToken,
}
}
func (s *skipper) ShouldSkip(tok Token) bool {
// should skip state will be modified only if previous token was new line (NL);
// and the current token is not WhiteSpace (WS).
if s.shouldSkip &&
s.prevTok.Type() == TokenNL &&
tok.Type() != TokenWS {
s.Continue()
return false
}
s.prevTok = tok
return s.shouldSkip
}
func (s *skipper) Skip() {
s.shouldSkip = true
}
func (s *skipper) Continue() {
s.shouldSkip = false
s.prevTok = emptyToken
}

View File

@ -1,35 +0,0 @@
package ini
// Statement is an empty AST mostly used for transitioning states.
func newStatement() AST {
return newAST(ASTKindStatement, AST{})
}
// SectionStatement represents a section AST
func newSectionStatement(tok Token) AST {
return newASTWithRootToken(ASTKindSectionStatement, tok)
}
// ExprStatement represents a completed expression AST
func newExprStatement(ast AST) AST {
return newAST(ASTKindExprStatement, ast)
}
// CommentStatement represents a comment in the ini defintion.
//
// grammar:
// comment -> #comment' | ;comment'
// comment' -> epsilon | value
func newCommentStatement(tok Token) AST {
return newAST(ASTKindCommentStatement, newExpression(tok))
}
// CompletedSectionStatement represents a completed section
func newCompletedSectionStatement(ast AST) AST {
return newAST(ASTKindCompletedSectionStatement, ast)
}
// SkipStatement is used to skip whole statements
func newSkipStatement(ast AST) AST {
return newAST(ASTKindSkipStatement, ast)
}

View File

@ -0,0 +1,89 @@
package ini
import (
"strings"
)
func trimProfileComment(s string) string {
r, _, _ := strings.Cut(s, "#")
r, _, _ = strings.Cut(r, ";")
return r
}
func trimPropertyComment(s string) string {
r, _, _ := strings.Cut(s, " #")
r, _, _ = strings.Cut(r, " ;")
r, _, _ = strings.Cut(r, "\t#")
r, _, _ = strings.Cut(r, "\t;")
return r
}
// assumes no surrounding comment
func splitProperty(s string) (string, string, bool) {
equalsi := strings.Index(s, "=")
coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment
sep := "="
if equalsi == -1 || coloni != -1 && coloni < equalsi {
sep = ":"
}
k, v, ok := strings.Cut(s, sep)
if !ok {
return "", "", false
}
return strings.TrimSpace(k), strings.TrimSpace(v), true
}
// assumes no surrounding comment, whitespace, or profile brackets
func splitProfile(s string) (string, string) {
var first int
for i, r := range s {
if isLineSpace(r) {
if first == 0 {
first = i
}
} else {
if first != 0 {
return s[:first], s[i:]
}
}
}
if first == 0 {
return "", s // type component is effectively blank
}
return "", ""
}
func isLineSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func unquote(s string) string {
if isSingleQuoted(s) || isDoubleQuoted(s) {
return s[1 : len(s)-1]
}
return s
}
// applies various legacy conversions to property values:
// - remote wrapping single/doublequotes
func legacyStrconv(s string) string {
s = unquote(s)
return s
}
func isSingleQuoted(s string) bool {
return hasAffixes(s, "'", "'")
}
func isDoubleQuoted(s string) bool {
return hasAffixes(s, `"`, `"`)
}
func isBracketed(s string) bool {
return hasAffixes(s, "[", "]")
}
func hasAffixes(s, left, right string) bool {
return strings.HasPrefix(s, left) && strings.HasSuffix(s, right)
}

View File

@ -0,0 +1,32 @@
package ini
type lineToken interface {
isLineToken()
}
type lineTokenProfile struct {
Type string
Name string
}
func (*lineTokenProfile) isLineToken() {}
type lineTokenProperty struct {
Key string
Value string
}
func (*lineTokenProperty) isLineToken() {}
type lineTokenContinuation struct {
Value string
}
func (*lineTokenContinuation) isLineToken() {}
type lineTokenSubProperty struct {
Key string
Value string
}
func (*lineTokenSubProperty) isLineToken() {}

View File

@ -0,0 +1,92 @@
package ini
import (
"strings"
)
func tokenize(lines []string) ([]lineToken, error) {
tokens := make([]lineToken, 0, len(lines))
for _, line := range lines {
if len(strings.TrimSpace(line)) == 0 || isLineComment(line) {
continue
}
if tok := asProfile(line); tok != nil {
tokens = append(tokens, tok)
} else if tok := asProperty(line); tok != nil {
tokens = append(tokens, tok)
} else if tok := asSubProperty(line); tok != nil {
tokens = append(tokens, tok)
} else if tok := asContinuation(line); tok != nil {
tokens = append(tokens, tok)
} // unrecognized tokens are effectively ignored
}
return tokens, nil
}
func isLineComment(line string) bool {
trimmed := strings.TrimLeft(line, " \t")
return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";")
}
func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment"
trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]"
if !isBracketed(trimmed) {
return nil
}
trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ")
trimmed = strings.TrimSpace(trimmed) // "type name" / "name"
typ, name := splitProfile(trimmed)
return &lineTokenProfile{
Type: typ,
Name: name,
}
}
func asProperty(line string) *lineTokenProperty {
if isLineSpace(rune(line[0])) {
return nil
}
trimmed := trimPropertyComment(line)
trimmed = strings.TrimRight(trimmed, " \t")
k, v, ok := splitProperty(trimmed)
if !ok {
return nil
}
return &lineTokenProperty{
Key: strings.ToLower(k), // LEGACY: normalize key case
Value: legacyStrconv(v), // LEGACY: see func docs
}
}
func asSubProperty(line string) *lineTokenSubProperty {
if !isLineSpace(rune(line[0])) {
return nil
}
// comments on sub-properties are included in the value
trimmed := strings.TrimLeft(line, " \t")
k, v, ok := splitProperty(trimmed)
if !ok {
return nil
}
return &lineTokenSubProperty{ // same LEGACY constraints as in normal property
Key: strings.ToLower(k),
Value: legacyStrconv(v),
}
}
func asContinuation(line string) *lineTokenContinuation {
if !isLineSpace(rune(line[0])) {
return nil
}
// includes comments like sub-properties
trimmed := strings.TrimLeft(line, " \t")
return &lineTokenContinuation{
Value: trimmed,
}
}

View File

@ -0,0 +1,93 @@
package ini
import (
"fmt"
"strconv"
"strings"
)
// ValueType is an enum that will signify what type
// the Value is
type ValueType int
func (v ValueType) String() string {
switch v {
case NoneType:
return "NONE"
case StringType:
return "STRING"
}
return ""
}
// ValueType enums
const (
NoneType = ValueType(iota)
StringType
QuotedStringType
)
// Value is a union container
type Value struct {
Type ValueType
str string
mp map[string]string
}
// NewStringValue returns a Value type generated using a string input.
func NewStringValue(str string) (Value, error) {
return Value{str: str}, nil
}
func (v Value) String() string {
switch v.Type {
case StringType:
return fmt.Sprintf("string: %s", string(v.str))
case QuotedStringType:
return fmt.Sprintf("quoted string: %s", string(v.str))
default:
return "union not set"
}
}
// MapValue returns a map value for sub properties
func (v Value) MapValue() map[string]string {
return v.mp
}
// IntValue returns an integer value
func (v Value) IntValue() (int64, bool) {
i, err := strconv.ParseInt(string(v.str), 0, 64)
if err != nil {
return 0, false
}
return i, true
}
// FloatValue returns a float value
func (v Value) FloatValue() (float64, bool) {
f, err := strconv.ParseFloat(string(v.str), 64)
if err != nil {
return 0, false
}
return f, true
}
// BoolValue returns a bool value
func (v Value) BoolValue() (bool, bool) {
// we don't use ParseBool as it recognizes more than what we've
// historically supported
if strings.EqualFold(v.str, "true") {
return true, true
} else if strings.EqualFold(v.str, "false") {
return false, true
}
return false, false
}
// StringValue returns the string value
func (v Value) StringValue() string {
return v.str
}

View File

@ -1,284 +0,0 @@
package ini
import (
"fmt"
)
// getStringValue will return a quoted string and the amount
// of bytes read
//
// an error will be returned if the string is not properly formatted
func getStringValue(b []rune) (int, error) {
if b[0] != '"' {
return 0, NewParseError("strings must start with '\"'")
}
endQuote := false
i := 1
for ; i < len(b) && !endQuote; i++ {
if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
endQuote = true
break
} else if escaped {
/*c, err := getEscapedByte(b[i])
if err != nil {
return 0, err
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--*/
continue
}
}
if !endQuote {
return 0, NewParseError("missing '\"' in string value")
}
return i + 1, nil
}
// getBoolValue will return a boolean and the amount
// of bytes read
//
// an error will be returned if the boolean is not of a correct
// value
func getBoolValue(b []rune) (int, error) {
if len(b) < 4 {
return 0, NewParseError("invalid boolean value")
}
n := 0
for _, lv := range literalValues {
if len(lv) > len(b) {
continue
}
if isCaselessLitValue(lv, b) {
n = len(lv)
}
}
if n == 0 {
return 0, NewParseError("invalid boolean value")
}
return n, nil
}
// getNumericalValue will return a numerical string, the amount
// of bytes read, and the base of the number
//
// an error will be returned if the number is not of a correct
// value
func getNumericalValue(b []rune) (int, int, error) {
if !isDigit(b[0]) {
return 0, 0, NewParseError("invalid digit value")
}
i := 0
helper := numberHelper{}
loop:
for negativeIndex := 0; i < len(b); i++ {
negativeIndex++
if !isDigit(b[i]) {
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return 0, 0, NewParseError("parse error '-'")
}
n := getNegativeNumber(b[i:])
i += (n - 1)
helper.Determine(b[i])
continue
case '.':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
negativeIndex = 0
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
if i == 0 && b[i] != '0' {
return 0, 0, NewParseError("incorrect base format, expected leading '0'")
}
if i != 1 {
return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
}
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
default:
if isWhitespace(b[i]) {
break loop
}
if isNewline(b[i:]) {
break loop
}
if !(helper.numberFormat == hex && isHexByte(b[i])) {
if i+2 < len(b) && !isNewline(b[i:i+2]) {
return 0, 0, NewParseError("invalid numerical character")
} else if !isNewline([]rune{b[i]}) {
return 0, 0, NewParseError("invalid numerical character")
}
break loop
}
}
}
}
return helper.Base(), i, nil
}
// isDigit will return whether or not something is an integer
func isDigit(b rune) bool {
return b >= '0' && b <= '9'
}
func hasExponent(v []rune) bool {
return contains(v, 'e') || contains(v, 'E')
}
func isBinaryByte(b rune) bool {
switch b {
case '0', '1':
return true
default:
return false
}
}
func isOctalByte(b rune) bool {
switch b {
case '0', '1', '2', '3', '4', '5', '6', '7':
return true
default:
return false
}
}
func isHexByte(b rune) bool {
if isDigit(b) {
return true
}
return (b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
func getValue(b []rune) (int, error) {
i := 0
for i < len(b) {
if isNewline(b[i:]) {
break
}
if isOp(b[i:]) {
break
}
valid, n, err := isValid(b[i:])
if err != nil {
return 0, err
}
if !valid {
break
}
i += n
}
return i, nil
}
// getNegativeNumber will return a negative number from a
// byte slice. This will iterate through all characters until
// a non-digit has been found.
func getNegativeNumber(b []rune) int {
if b[0] != '-' {
return 0
}
i := 1
for ; i < len(b); i++ {
if !isDigit(b[i]) {
return i
}
}
return i
}
// isEscaped will return whether or not the character is an escaped
// character.
func isEscaped(value []rune, b rune) bool {
if len(value) == 0 {
return false
}
switch b {
case '\'': // single quote
case '"': // quote
case 'n': // newline
case 't': // tab
case '\\': // backslash
default:
return false
}
return value[len(value)-1] == '\\'
}
func getEscapedByte(b rune) (rune, error) {
switch b {
case '\'': // single quote
return '\'', nil
case '"': // quote
return '"', nil
case 'n': // newline
return '\n', nil
case 't': // table
return '\t', nil
case '\\': // backslash
return '\\', nil
default:
return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
}
}
func removeEscapedCharacters(b []rune) []rune {
for i := 0; i < len(b); i++ {
if isEscaped(b[:i], b[i]) {
c, err := getEscapedByte(b[i])
if err != nil {
return b
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--
}
}
return b
}

View File

@ -1,269 +0,0 @@
package ini
import (
"fmt"
"sort"
"strings"
)
// Visitor is an interface used by walkers that will
// traverse an array of ASTs.
type Visitor interface {
VisitExpr(AST) error
VisitStatement(AST) error
}
// DefaultVisitor is used to visit statements and expressions
// and ensure that they are both of the correct format.
// In addition, upon visiting this will build sections and populate
// the Sections field which can be used to retrieve profile
// configuration.
type DefaultVisitor struct {
// scope is the profile which is being visited
scope string
// path is the file path which the visitor is visiting
path string
// Sections defines list of the profile section
Sections Sections
}
// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath
// which points to the file it is visiting.
func NewDefaultVisitor(filepath string) *DefaultVisitor {
return &DefaultVisitor{
Sections: Sections{
container: map[string]Section{},
},
path: filepath,
}
}
// VisitExpr visits expressions...
func (v *DefaultVisitor) VisitExpr(expr AST) error {
t := v.Sections.container[v.scope]
if t.values == nil {
t.values = values{}
}
if t.SourceFile == nil {
t.SourceFile = make(map[string]string, 0)
}
switch expr.Kind {
case ASTKindExprStatement:
opExpr := expr.GetRoot()
switch opExpr.Kind {
case ASTKindEqualExpr:
children := opExpr.GetChildren()
if len(children) <= 1 {
return NewParseError("unexpected token type")
}
rhs := children[1]
// The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values.
// If the token is not either a literal or one of the token types that identifies those four additional
// tokens then error.
if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) {
return NewParseError("unexpected token type")
}
key := EqualExprKey(opExpr)
val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
if err != nil {
return err
}
// lower case key to standardize
k := strings.ToLower(key)
// identify if the section already had this key, append log on section
if t.Has(k) {
t.Logs = append(t.Logs,
fmt.Sprintf("For profile: %v, overriding %v value, "+
"with a %v value found in a duplicate profile defined later in the same file %v. \n",
t.Name, k, k, v.path))
}
// assign the value
t.values[k] = val
// update the source file path for region
t.SourceFile[k] = v.path
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
v.Sections.container[v.scope] = t
return nil
}
// VisitStatement visits statements...
func (v *DefaultVisitor) VisitStatement(stmt AST) error {
switch stmt.Kind {
case ASTKindCompletedSectionStatement:
child := stmt.GetRoot()
if child.Kind != ASTKindSectionStatement {
return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
}
name := string(child.Root.Raw())
// trim start and end space
name = strings.TrimSpace(name)
// if has prefix "profile " + [ws+] + "profile-name",
// we standardize by removing the [ws+] between prefix and profile-name.
if strings.HasPrefix(name, "profile ") {
names := strings.SplitN(name, " ", 2)
name = names[0] + " " + strings.TrimLeft(names[1], " ")
}
// attach profile name on section
if !v.Sections.HasSection(name) {
v.Sections.container[name] = NewSection(name)
}
v.scope = name
default:
return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
}
return nil
}
// Sections is a map of Section structures that represent
// a configuration.
type Sections struct {
container map[string]Section
}
// NewSections returns empty ini Sections
func NewSections() Sections {
return Sections{
container: make(map[string]Section, 0),
}
}
// GetSection will return section p. If section p does not exist,
// false will be returned in the second parameter.
func (t Sections) GetSection(p string) (Section, bool) {
v, ok := t.container[p]
return v, ok
}
// HasSection denotes if Sections consist of a section with
// provided name.
func (t Sections) HasSection(p string) bool {
_, ok := t.container[p]
return ok
}
// SetSection sets a section value for provided section name.
func (t Sections) SetSection(p string, v Section) Sections {
t.container[p] = v
return t
}
// DeleteSection deletes a section entry/value for provided section name./
func (t Sections) DeleteSection(p string) {
delete(t.container, p)
}
// values represents a map of union values.
type values map[string]Value
// List will return a list of all sections that were successfully
// parsed.
func (t Sections) List() []string {
keys := make([]string, len(t.container))
i := 0
for k := range t.container {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// Section contains a name and values. This represent
// a sectioned entry in a configuration file.
type Section struct {
// Name is the Section profile name
Name string
// values are the values within parsed profile
values values
// Errors is the list of errors
Errors []error
// Logs is the list of logs
Logs []string
// SourceFile is the INI Source file from where this section
// was retrieved. They key is the property, value is the
// source file the property was retrieved from.
SourceFile map[string]string
}
// NewSection returns an initialize section for the name
func NewSection(name string) Section {
return Section{
Name: name,
values: values{},
SourceFile: map[string]string{},
}
}
// UpdateSourceFile updates source file for a property to provided filepath.
func (t Section) UpdateSourceFile(property string, filepath string) {
t.SourceFile[property] = filepath
}
// UpdateValue updates value for a provided key with provided value
func (t Section) UpdateValue(k string, v Value) error {
t.values[k] = v
return nil
}
// Has will return whether or not an entry exists in a given section
func (t Section) Has(k string) bool {
_, ok := t.values[k]
return ok
}
// ValueType will returned what type the union is set to. If
// k was not found, the NoneType will be returned.
func (t Section) ValueType(k string) (ValueType, bool) {
v, ok := t.values[k]
return v.Type, ok
}
// Bool returns a bool value at k
func (t Section) Bool(k string) bool {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) int64 {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) float64 {
return t.values[k].FloatValue()
}
// String returns the string value at k
func (t Section) String(k string) string {
_, ok := t.values[k]
if !ok {
return ""
}
return t.values[k].StringValue()
}

View File

@ -1,25 +0,0 @@
package ini
// Walk will traverse the AST using the v, the Visitor.
func Walk(tree []AST, v Visitor) error {
for _, node := range tree {
switch node.Kind {
case ASTKindExpr,
ASTKindExprStatement:
if err := v.VisitExpr(node); err != nil {
return err
}
case ASTKindStatement,
ASTKindCompletedSectionStatement,
ASTKindNestedSectionStatement,
ASTKindCompletedNestedSectionStatement:
if err := v.VisitStatement(node); err != nil {
return err
}
}
}
return nil
}

View File

@ -1,24 +0,0 @@
package ini
import (
"unicode"
)
// isWhitespace will return whether or not the character is
// a whitespace character.
//
// Whitespace is defined as a space or tab.
func isWhitespace(c rune) bool {
return unicode.IsSpace(c) && c != '\n' && c != '\r'
}
func newWSToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if !isWhitespace(b[i]) {
break
}
}
return newToken(TokenWS, b[:i], NoneType), i, nil
}