mirror of
				https://gitea.com/Lydanne/buildx.git
				synced 2025-11-01 00:23:56 +08:00 
			
		
		
		
	Use compose-spec parser
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
		| @@ -6,22 +6,25 @@ import ( | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/docker/cli/cli/compose/loader" | ||||
| 	composetypes "github.com/docker/cli/cli/compose/types" | ||||
| 	"github.com/compose-spec/compose-go/loader" | ||||
| 	compose "github.com/compose-spec/compose-go/types" | ||||
| ) | ||||
|  | ||||
| func parseCompose(dt []byte) (*composetypes.Config, error) { | ||||
| 	parsed, err := loader.ParseYAML([]byte(dt)) | ||||
| func parseCompose(dt []byte) (*compose.Project, error) { | ||||
| 	config, err := loader.ParseYAML(dt) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return loader.Load(composetypes.ConfigDetails{ | ||||
| 		ConfigFiles: []composetypes.ConfigFile{ | ||||
|  | ||||
| 	return loader.Load(compose.ConfigDetails{ | ||||
| 		ConfigFiles: []compose.ConfigFile{ | ||||
| 			{ | ||||
| 				Config: parsed, | ||||
| 				Config: config, | ||||
| 			}, | ||||
| 		}, | ||||
| 		Environment: envMap(os.Environ()), | ||||
| 	}, func(options *loader.Options) { | ||||
| 		options.SkipNormalization = true | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| @@ -44,7 +47,7 @@ func ParseCompose(dt []byte) (*Config, error) { | ||||
| 	} | ||||
|  | ||||
| 	var c Config | ||||
| 	var zeroBuildConfig composetypes.BuildConfig | ||||
| 	var zeroBuildConfig compose.BuildConfig | ||||
| 	if len(cfg.Services) > 0 { | ||||
| 		c.Groups = []*Group{} | ||||
| 		c.Targets = []*Target{} | ||||
| @@ -53,7 +56,7 @@ func ParseCompose(dt []byte) (*Config, error) { | ||||
|  | ||||
| 		for _, s := range cfg.Services { | ||||
|  | ||||
| 			if reflect.DeepEqual(s.Build, zeroBuildConfig) { | ||||
| 			if s.Build == nil || reflect.DeepEqual(s.Build, zeroBuildConfig) { | ||||
| 				// if not make sure they're setting an image or it's invalid d-c.yml | ||||
| 				if s.Image == "" { | ||||
| 					return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided", s.Name) | ||||
| @@ -97,7 +100,7 @@ func ParseCompose(dt []byte) (*Config, error) { | ||||
| 	return &c, nil | ||||
| } | ||||
|  | ||||
| func toMap(in composetypes.MappingWithEquals) map[string]string { | ||||
| func toMap(in compose.MappingWithEquals) map[string]string { | ||||
| 	m := map[string]string{} | ||||
| 	for k, v := range in { | ||||
| 		if v != nil { | ||||
|   | ||||
| @@ -9,8 +9,6 @@ import ( | ||||
|  | ||||
| func TestParseCompose(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3" | ||||
|  | ||||
| services: | ||||
|   db: | ||||
|     build: ./db | ||||
| @@ -48,8 +46,6 @@ services: | ||||
|  | ||||
| func TestNoBuildOutOfTreeService(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3.7" | ||||
|  | ||||
| services: | ||||
|     external: | ||||
|         image: "verycooldb:1337" | ||||
| @@ -63,8 +59,6 @@ services: | ||||
|  | ||||
| func TestParseComposeTarget(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3.7" | ||||
|  | ||||
| services: | ||||
|   db: | ||||
|     build: | ||||
| @@ -91,8 +85,6 @@ services: | ||||
|  | ||||
| func TestComposeBuildWithoutContext(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3.7" | ||||
|  | ||||
| services: | ||||
|   db: | ||||
|     build: | ||||
| @@ -117,8 +109,6 @@ services: | ||||
|  | ||||
| func TestBogusCompose(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3.7" | ||||
|  | ||||
| services: | ||||
|   db: | ||||
|     labels: | ||||
| @@ -131,5 +121,66 @@ services: | ||||
|  | ||||
| 	_, err := ParseCompose(dt) | ||||
| 	require.Error(t, err) | ||||
| 	require.Contains(t, err.Error(), "has neither an image nor a build context specified. At least one must be provided") | ||||
| 	require.Contains(t, err.Error(), "has neither an image nor a build context specified: invalid compose project") | ||||
| } | ||||
|  | ||||
| func TestAdvancedNetwork(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| services: | ||||
|   db: | ||||
|     networks: | ||||
|       - example.com | ||||
|     build: | ||||
|       context: ./db | ||||
|       target: db | ||||
|  | ||||
| networks: | ||||
|   example.com: | ||||
|     name: example.com | ||||
|     driver: bridge | ||||
|     ipam: | ||||
|       config: | ||||
|         - subnet: 10.5.0.0/24 | ||||
|           ip_range: 10.5.0.0/24 | ||||
|           gateway: 10.5.0.254 | ||||
| `) | ||||
|  | ||||
| 	_, err := ParseCompose(dt) | ||||
| 	require.NoError(t, err) | ||||
| } | ||||
|  | ||||
| func TestDependsOnList(t *testing.T) { | ||||
| 	var dt = []byte(` | ||||
| version: "3.8" | ||||
|  | ||||
| services: | ||||
|   example-container: | ||||
|     image: example/fails:latest | ||||
|     build: | ||||
|       context: . | ||||
|       dockerfile: Dockerfile | ||||
|     depends_on: | ||||
|       other-container: | ||||
|         condition: service_healthy | ||||
|     networks: | ||||
|       default: | ||||
|         aliases: | ||||
|           - integration-tests | ||||
|  | ||||
|   other-container: | ||||
|     image: example/other:latest | ||||
|     healthcheck: | ||||
|       test: ["CMD", "echo", "success"] | ||||
|       retries: 5 | ||||
|       interval: 5s | ||||
|       timeout: 10s | ||||
|       start_period: 5s | ||||
|  | ||||
| networks: | ||||
|   default: | ||||
|     name: test-net | ||||
| `) | ||||
|  | ||||
| 	_, err := ParseCompose(dt) | ||||
| 	require.NoError(t, err) | ||||
| } | ||||
|   | ||||
							
								
								
									
										7
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										7
									
								
								go.mod
									
									
									
									
									
								
							| @@ -9,6 +9,7 @@ require ( | ||||
| 	github.com/bugsnag/panicwrap v1.2.0 // indirect | ||||
| 	github.com/cenkalti/backoff v2.1.1+incompatible // indirect | ||||
| 	github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect | ||||
| 	github.com/compose-spec/compose-go v0.0.0-20210706130854-69459d4976b5 | ||||
| 	github.com/containerd/console v1.0.1 | ||||
| 	github.com/containerd/containerd v1.5.0-beta.4 | ||||
| 	github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect | ||||
| @@ -23,7 +24,6 @@ require ( | ||||
| 	github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect | ||||
| 	github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect | ||||
| 	github.com/fvbommel/sortorder v1.0.1 // indirect | ||||
| 	github.com/go-sql-driver/mysql v1.5.0 // indirect | ||||
| 	github.com/gofrs/flock v0.7.3 | ||||
| 	github.com/gofrs/uuid v3.3.0+incompatible // indirect | ||||
| 	github.com/google/certificate-transparency-go v1.0.21 // indirect | ||||
| @@ -42,15 +42,14 @@ require ( | ||||
| 	github.com/opencontainers/image-spec v1.0.1 | ||||
| 	github.com/pkg/errors v0.9.1 | ||||
| 	github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 | ||||
| 	github.com/sirupsen/logrus v1.7.0 | ||||
| 	github.com/sirupsen/logrus v1.8.1 | ||||
| 	github.com/spf13/cobra v1.1.1 | ||||
| 	github.com/spf13/pflag v1.0.5 | ||||
| 	github.com/stretchr/testify v1.7.0 | ||||
| 	github.com/theupdateframework/notary v0.6.1 // indirect | ||||
| 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea | ||||
| 	github.com/xeipuuv/gojsonschema v1.2.0 // indirect | ||||
| 	github.com/zclconf/go-cty v1.7.1 | ||||
| 	golang.org/x/sync v0.0.0-20201207232520-09787c993a3a | ||||
| 	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c | ||||
| 	gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect | ||||
| 	gopkg.in/fatih/pool.v2 v2.0.0 // indirect | ||||
| 	gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect | ||||
|   | ||||
							
								
								
									
										30
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								go.sum
									
									
									
									
									
								
							| @@ -82,6 +82,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV | ||||
| github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= | ||||
| github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= | ||||
| github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= | ||||
| github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= | ||||
| github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | ||||
| github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | ||||
| github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= | ||||
| @@ -99,6 +100,7 @@ github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9 | ||||
| github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= | ||||
| github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= | ||||
| github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= | ||||
| github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= | ||||
| github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= | ||||
| github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= | ||||
| github.com/bugsnag/bugsnag-go v1.4.1 h1:TT3P9AX69w8mbSGE8L7IJOO2KBlPN0iQtYD0dUlrWHc= | ||||
| @@ -126,6 +128,8 @@ github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e/go.mod h1:yMWuSON | ||||
| github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= | ||||
| github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= | ||||
| github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= | ||||
| github.com/compose-spec/compose-go v0.0.0-20210706130854-69459d4976b5 h1:PpI72CT1bcVPNZyqI1HI8UhQVRVtqLb2tdwi5WphN3c= | ||||
| github.com/compose-spec/compose-go v0.0.0-20210706130854-69459d4976b5/go.mod h1:5V65rPnTvvQagtoMxTneJ2QicLq6ZRQQ7fOgPN226fo= | ||||
| github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= | ||||
| github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= | ||||
| github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= | ||||
| @@ -235,6 +239,8 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l | ||||
| github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= | ||||
| github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= | ||||
| github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= | ||||
| github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e h1:n81KvOMrLZa+VWHwST7dun9f0G98X3zREHS1ztYzZKU= | ||||
| github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e/go.mod h1:xpWTC2KnJMiDLkoawhsPQcXjvwATEBcbq0xevG2YR9M= | ||||
| github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= | ||||
| github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= | ||||
| github.com/docker/cli v20.10.5+incompatible h1:bjflayQbWg+xOkF2WPEAOi4Y7zWhR7ptoPhV/VqLVDE= | ||||
| @@ -288,6 +294,7 @@ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DP | ||||
| github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= | ||||
| github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= | ||||
| github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= | ||||
| github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= | ||||
| github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= | ||||
| @@ -352,6 +359,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt | ||||
| github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= | ||||
| github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= | ||||
| github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= | ||||
| github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | ||||
| github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | ||||
| github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= | ||||
| @@ -363,8 +371,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ | ||||
| github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= | ||||
| github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= | ||||
| github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= | ||||
| github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| @@ -388,6 +397,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i | ||||
| github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= | ||||
| github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= | ||||
| github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= | ||||
| github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= | ||||
| github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= | ||||
| github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= | ||||
| github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= | ||||
| @@ -442,8 +452,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: | ||||
| github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= | ||||
| github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= | ||||
| github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= | ||||
| github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= | ||||
| github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= | ||||
| github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= | ||||
| github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= | ||||
| github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= | ||||
| github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= | ||||
| github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= | ||||
| @@ -456,6 +467,9 @@ github.com/jinzhu/now v1.0.0 h1:6WV8LvwPpDhKjo5U9O6b4+xdG/jTXNPwlDme/MTo8Ns= | ||||
| github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= | ||||
| github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= | ||||
| github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= | ||||
| github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= | ||||
| github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= | ||||
| github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= | ||||
| github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= | ||||
| github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= | ||||
| github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= | ||||
| @@ -501,6 +515,8 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx | ||||
| github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= | ||||
| github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= | ||||
| github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= | ||||
| github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= | ||||
| github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= | ||||
| github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= | ||||
| github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= | ||||
| github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= | ||||
| @@ -520,8 +536,9 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 | ||||
| github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= | ||||
| github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= | ||||
| github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | ||||
| github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= | ||||
| github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | ||||
| github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= | ||||
| github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= | ||||
| github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= | ||||
| github.com/moby/buildkit v0.8.2-0.20210401015549-df49b648c8bf h1:dHwWBX8OhYb69qVcT27rFSwzKsn4CRbg0HInQyVh33c= | ||||
| github.com/moby/buildkit v0.8.2-0.20210401015549-df49b648c8bf/go.mod h1:GJcrUlTGFAPlEmPQtbrTsJYn+cy+Jwl7vTZS7jYAoow= | ||||
| @@ -670,8 +687,9 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx | ||||
| github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= | ||||
| github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= | ||||
| github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= | ||||
| github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= | ||||
| github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= | ||||
| github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= | ||||
| github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= | ||||
| github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= | ||||
| github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= | ||||
| github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= | ||||
| @@ -788,6 +806,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U | ||||
| golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| @@ -870,8 +889,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ | ||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= | ||||
| golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= | ||||
| golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
|   | ||||
							
								
								
									
										191
									
								
								vendor/github.com/compose-spec/compose-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										191
									
								
								vendor/github.com/compose-spec/compose-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,191 @@ | ||||
|  | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         https://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    Copyright 2013-2017 Docker, Inc. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        https://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										2
									
								
								vendor/github.com/compose-spec/compose-go/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/compose-spec/compose-go/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| The Compose Specification | ||||
| Copyright 2020 The Compose Specification Authors | ||||
							
								
								
									
										53
									
								
								vendor/github.com/compose-spec/compose-go/errdefs/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								vendor/github.com/compose-spec/compose-go/errdefs/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
|  | ||||
| package errdefs | ||||
|  | ||||
| import "errors" | ||||
|  | ||||
| var ( | ||||
| 	// ErrNotFound is returned when an object is not found | ||||
| 	ErrNotFound = errors.New("not found") | ||||
|  | ||||
| 	// ErrInvalid is returned when a compose project is invalid | ||||
| 	ErrInvalid = errors.New("invalid compose project") | ||||
|  | ||||
| 	// ErrUnsupported is returned when a compose project uses an unsupported attribute | ||||
| 	ErrUnsupported = errors.New("unsupported attribute") | ||||
|  | ||||
| 	// ErrIncompatible is returned when a compose project uses an incompatible attribute | ||||
| 	ErrIncompatible = errors.New("incompatible attribute") | ||||
| ) | ||||
|  | ||||
| // IsNotFoundError returns true if the unwrapped error is ErrNotFound | ||||
| func IsNotFoundError(err error) bool { | ||||
| 	return errors.Is(err, ErrNotFound) | ||||
| } | ||||
|  | ||||
| // IsInvalidError returns true if the unwrapped error is ErrInvalid | ||||
| func IsInvalidError(err error) bool { | ||||
| 	return errors.Is(err, ErrInvalid) | ||||
| } | ||||
|  | ||||
| // IsUnsupportedError returns true if the unwrapped error is ErrUnsupported | ||||
| func IsUnsupportedError(err error) bool { | ||||
| 	return errors.Is(err, ErrUnsupported) | ||||
| } | ||||
|  | ||||
| // IsUnsupportedError returns true if the unwrapped error is ErrIncompatible | ||||
| func IsIncompatibleError(err error) bool { | ||||
| 	return errors.Is(err, ErrIncompatible) | ||||
| } | ||||
| @@ -1,10 +1,26 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package interpolation | ||||
| 
 | ||||
| import ( | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/docker/cli/cli/compose/template" | ||||
| 	"github.com/compose-spec/compose-go/template" | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
| 
 | ||||
| @@ -1,5 +1,3 @@ | ||||
| version: "3.9" | ||||
| 
 | ||||
| services: | ||||
|   foo: | ||||
| 
 | ||||
| @@ -13,9 +11,6 @@ services: | ||||
|       cache_from: | ||||
|         - foo | ||||
|         - bar | ||||
|       extra_hosts: | ||||
|         - "ipv4.example.com:127.0.0.1" | ||||
|         - "ipv6.example.com:::1" | ||||
|       labels: [FOO=BAR] | ||||
| 
 | ||||
| 
 | ||||
| @@ -68,7 +63,6 @@ services: | ||||
|         limits: | ||||
|           cpus: '0.001' | ||||
|           memory: 50M | ||||
|           pids: 100 | ||||
|         reservations: | ||||
|           cpus: '0.0001' | ||||
|           memory: 20M | ||||
| @@ -144,11 +138,9 @@ services: | ||||
|     # extra_hosts: | ||||
|     #   somehost: "162.242.195.82" | ||||
|     #   otherhost: "50.31.209.229" | ||||
|     #   host.docker.internal: "host-gateway" | ||||
|     extra_hosts: | ||||
|       - "somehost:162.242.195.82" | ||||
|       - "otherhost:50.31.209.229" | ||||
|       - "host.docker.internal:host-gateway" | ||||
| 
 | ||||
|     hostname: foo | ||||
| 
 | ||||
| @@ -313,10 +305,15 @@ networks: | ||||
|       #   com.docker.network.enable_ipv6: "true" | ||||
|       #   com.docker.network.numeric_value: 1 | ||||
|       config: | ||||
|       - subnet: 172.16.238.0/24 | ||||
|         # gateway: 172.16.238.1 | ||||
|       - subnet: 172.28.0.0/16 | ||||
|         ip_range: 172.28.5.0/24 | ||||
|         gateway: 172.28.5.254 | ||||
|         aux_addresses: | ||||
|           host1: 172.28.1.5 | ||||
|           host2: 172.28.1.6 | ||||
|           host3: 172.28.1.7 | ||||
|       - subnet: 2001:3984:3989::/64 | ||||
|         # gateway: 2001:3984:3989::1 | ||||
|         gateway: 2001:3984:3989::1 | ||||
| 
 | ||||
|     labels: | ||||
|       foo: bar | ||||
| @@ -1,10 +1,26 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package loader | ||||
| 
 | ||||
| import ( | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	interp "github.com/docker/cli/cli/compose/interpolation" | ||||
| 	interp "github.com/compose-spec/compose-go/interpolation" | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
| 
 | ||||
| @@ -66,7 +82,3 @@ func toBoolean(value string) (interface{}, error) { | ||||
| 		return nil, errors.Errorf("invalid boolean: %s", value) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func interpolateConfig(configDict map[string]interface{}, opts interp.Options) (map[string]interface{}, error) { | ||||
| 	return interp.Interpolate(configDict, opts) | ||||
| } | ||||
| @@ -1,7 +1,25 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package loader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"reflect" | ||||
| @@ -9,15 +27,14 @@ import ( | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	interp "github.com/docker/cli/cli/compose/interpolation" | ||||
| 	"github.com/docker/cli/cli/compose/schema" | ||||
| 	"github.com/docker/cli/cli/compose/template" | ||||
| 	"github.com/docker/cli/cli/compose/types" | ||||
| 	"github.com/docker/cli/opts" | ||||
| 	"github.com/docker/docker/api/types/versions" | ||||
| 	"github.com/docker/go-connections/nat" | ||||
| 	interp "github.com/compose-spec/compose-go/interpolation" | ||||
| 	"github.com/compose-spec/compose-go/schema" | ||||
| 	"github.com/compose-spec/compose-go/template" | ||||
| 	"github.com/compose-spec/compose-go/types" | ||||
| 	units "github.com/docker/go-units" | ||||
| 	"github.com/google/shlex" | ||||
| 	"github.com/imdario/mergo" | ||||
| 	"github.com/joho/godotenv" | ||||
| 	shellwords "github.com/mattn/go-shellwords" | ||||
| 	"github.com/mitchellh/mapstructure" | ||||
| 	"github.com/pkg/errors" | ||||
| 	"github.com/sirupsen/logrus" | ||||
| @@ -30,10 +47,54 @@ type Options struct { | ||||
| 	SkipValidation bool | ||||
| 	// Skip interpolation | ||||
| 	SkipInterpolation bool | ||||
| 	// Skip normalization | ||||
| 	SkipNormalization bool | ||||
| 	// Skip consistency check | ||||
| 	SkipConsistencyCheck bool | ||||
| 	// Skip extends | ||||
| 	SkipExtends bool | ||||
| 	// Interpolation options | ||||
| 	Interpolate *interp.Options | ||||
| 	// Discard 'env_file' entries after resolving to 'environment' section | ||||
| 	discardEnvFiles bool | ||||
| 	// Set project name | ||||
| 	Name string | ||||
| } | ||||
| 
 | ||||
| // serviceRef identifies a reference to a service. It's used to detect cyclic | ||||
| // references in "extends". | ||||
| type serviceRef struct { | ||||
| 	filename string | ||||
| 	service  string | ||||
| } | ||||
| 
 | ||||
| type cycleTracker struct { | ||||
| 	loaded []serviceRef | ||||
| } | ||||
| 
 | ||||
| func (ct *cycleTracker) Add(filename, service string) error { | ||||
| 	toAdd := serviceRef{filename: filename, service: service} | ||||
| 	for _, loaded := range ct.loaded { | ||||
| 		if toAdd == loaded { | ||||
| 			// Create an error message of the form: | ||||
| 			// Circular reference: | ||||
| 			//   service-a in docker-compose.yml | ||||
| 			//   extends service-b in docker-compose.yml | ||||
| 			//   extends service-a in docker-compose.yml | ||||
| 			errLines := []string{ | ||||
| 				"Circular reference:", | ||||
| 				fmt.Sprintf("  %s in %s", ct.loaded[0].service, ct.loaded[0].filename), | ||||
| 			} | ||||
| 			for _, service := range append(ct.loaded[1:], toAdd) { | ||||
| 				errLines = append(errLines, fmt.Sprintf("  extends %s in %s", service.service, service.filename)) | ||||
| 			} | ||||
| 
 | ||||
| 			return errors.New(strings.Join(errLines, "\n")) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	ct.loaded = append(ct.loaded, toAdd) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to | ||||
| @@ -61,7 +122,7 @@ func ParseYAML(source []byte) (map[string]interface{}, error) { | ||||
| } | ||||
| 
 | ||||
| // Load reads a ConfigDetails and returns a fully loaded configuration | ||||
| func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) { | ||||
| func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { | ||||
| 	if len(configDetails.ConfigFiles) < 1 { | ||||
| 		return nil, errors.Errorf("No files specified") | ||||
| 	} | ||||
| @@ -79,40 +140,30 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types. | ||||
| 	} | ||||
| 
 | ||||
| 	configs := []*types.Config{} | ||||
| 	var err error | ||||
| 
 | ||||
| 	for _, file := range configDetails.ConfigFiles { | ||||
| 	for i, file := range configDetails.ConfigFiles { | ||||
| 		configDict := file.Config | ||||
| 		version := schema.Version(configDict) | ||||
| 		if configDetails.Version == "" { | ||||
| 			configDetails.Version = version | ||||
| 		} | ||||
| 		if configDetails.Version != version { | ||||
| 			return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version) | ||||
| 		} | ||||
| 
 | ||||
| 		if err := validateForbidden(configDict); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		if !opts.SkipInterpolation { | ||||
| 			configDict, err = interpolateConfig(configDict, *opts.Interpolate) | ||||
| 		if configDict == nil { | ||||
| 			dict, err := parseConfig(file.Content, opts) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			configDict = dict | ||||
| 			file.Config = dict | ||||
| 			configDetails.ConfigFiles[i] = file | ||||
| 		} | ||||
| 
 | ||||
| 		if !opts.SkipValidation { | ||||
| 			if err := schema.Validate(configDict, configDetails.Version); err != nil { | ||||
| 			if err := schema.Validate(configDict); err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		cfg, err := loadSections(configDict, configDetails) | ||||
| 		configDict = groupXFieldsIntoExtensions(configDict) | ||||
| 
 | ||||
| 		cfg, err := loadSections(file.Filename, configDict, configDetails, opts) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		cfg.Filename = file.Filename | ||||
| 		if opts.discardEnvFiles { | ||||
| 			for i := range cfg.Services { | ||||
| 				cfg.Services[i].EnvFile = nil | ||||
| @@ -122,73 +173,112 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types. | ||||
| 		configs = append(configs, cfg) | ||||
| 	} | ||||
| 
 | ||||
| 	return merge(configs) | ||||
| 	model, err := merge(configs) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| func validateForbidden(configDict map[string]interface{}) error { | ||||
| 	servicesDict, ok := configDict["services"].(map[string]interface{}) | ||||
| 	if !ok { | ||||
| 		return nil | ||||
| 	for _, s := range model.Services { | ||||
| 		var newEnvFiles types.StringList | ||||
| 		for _, ef := range s.EnvFile { | ||||
| 			newEnvFiles = append(newEnvFiles, absPath(configDetails.WorkingDir, ef)) | ||||
| 		} | ||||
| 	forbidden := getProperties(servicesDict, types.ForbiddenProperties) | ||||
| 	if len(forbidden) > 0 { | ||||
| 		return &ForbiddenPropertiesError{Properties: forbidden} | ||||
| 	} | ||||
| 	return nil | ||||
| 		s.EnvFile = newEnvFiles | ||||
| 	} | ||||
| 
 | ||||
| func loadSections(config map[string]interface{}, configDetails types.ConfigDetails) (*types.Config, error) { | ||||
| 	var err error | ||||
| 	cfg := types.Config{ | ||||
| 		Version: schema.Version(config), | ||||
| 	project := &types.Project{ | ||||
| 		Name:        opts.Name, | ||||
| 		WorkingDir:  configDetails.WorkingDir, | ||||
| 		Services:    model.Services, | ||||
| 		Networks:    model.Networks, | ||||
| 		Volumes:     model.Volumes, | ||||
| 		Secrets:     model.Secrets, | ||||
| 		Configs:     model.Configs, | ||||
| 		Environment: configDetails.Environment, | ||||
| 		Extensions:  model.Extensions, | ||||
| 	} | ||||
| 
 | ||||
| 	var loaders = []struct { | ||||
| 		key string | ||||
| 		fnc func(config map[string]interface{}) error | ||||
| 	}{ | ||||
| 		{ | ||||
| 			key: "services", | ||||
| 			fnc: func(config map[string]interface{}) error { | ||||
| 				cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv) | ||||
| 				return err | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			key: "networks", | ||||
| 			fnc: func(config map[string]interface{}) error { | ||||
| 				cfg.Networks, err = LoadNetworks(config, configDetails.Version) | ||||
| 				return err | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			key: "volumes", | ||||
| 			fnc: func(config map[string]interface{}) error { | ||||
| 				cfg.Volumes, err = LoadVolumes(config, configDetails.Version) | ||||
| 				return err | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			key: "secrets", | ||||
| 			fnc: func(config map[string]interface{}) error { | ||||
| 				cfg.Secrets, err = LoadSecrets(config, configDetails) | ||||
| 				return err | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			key: "configs", | ||||
| 			fnc: func(config map[string]interface{}) error { | ||||
| 				cfg.Configs, err = LoadConfigObjs(config, configDetails) | ||||
| 				return err | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for _, loader := range loaders { | ||||
| 		if err := loader.fnc(getSection(config, loader.key)); err != nil { | ||||
| 	if !opts.SkipNormalization { | ||||
| 		err = normalize(project) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	cfg.Extras = getExtras(config) | ||||
| 
 | ||||
| 	if !opts.SkipConsistencyCheck { | ||||
| 		err = checkConsistency(project) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return project, nil | ||||
| } | ||||
| 
 | ||||
| func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) { | ||||
| 	if !opts.SkipInterpolation { | ||||
| 		substitute, err := opts.Interpolate.Substitute(string(b), template.Mapping(opts.Interpolate.LookupValue)) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		b = []byte(substitute) | ||||
| 	} | ||||
| 
 | ||||
| 	return ParseYAML(b) | ||||
| } | ||||
| 
 | ||||
| func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} { | ||||
| 	extras := map[string]interface{}{} | ||||
| 	for key, value := range dict { | ||||
| 		if strings.HasPrefix(key, "x-") { | ||||
| 			extras[key] = value | ||||
| 			delete(dict, key) | ||||
| 		} | ||||
| 		if d, ok := value.(map[string]interface{}); ok { | ||||
| 			dict[key] = groupXFieldsIntoExtensions(d) | ||||
| 		} | ||||
| 	} | ||||
| 	if len(extras) > 0 { | ||||
| 		dict["extensions"] = extras | ||||
| 	} | ||||
| 	return dict | ||||
| } | ||||
| 
 | ||||
| func loadSections(filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) { | ||||
| 	var err error | ||||
| 	cfg := types.Config{ | ||||
| 		Filename: filename, | ||||
| 	} | ||||
| 
 | ||||
| 	cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	cfg.Networks, err = LoadNetworks(getSection(config, "networks"), configDetails.Version) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cfg.Volumes, err = LoadVolumes(getSection(config, "volumes")) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	extensions := getSection(config, "extensions") | ||||
| 	if len(extensions) > 0 { | ||||
| 		cfg.Extensions = extensions | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &cfg, nil | ||||
| } | ||||
| 
 | ||||
| @@ -200,65 +290,6 @@ func getSection(config map[string]interface{}, key string) map[string]interface{ | ||||
| 	return section.(map[string]interface{}) | ||||
| } | ||||
| 
 | ||||
| // GetUnsupportedProperties returns the list of any unsupported properties that are | ||||
| // used in the Compose files. | ||||
| func GetUnsupportedProperties(configDicts ...map[string]interface{}) []string { | ||||
| 	unsupported := map[string]bool{} | ||||
| 
 | ||||
| 	for _, configDict := range configDicts { | ||||
| 		for _, service := range getServices(configDict) { | ||||
| 			serviceDict := service.(map[string]interface{}) | ||||
| 			for _, property := range types.UnsupportedProperties { | ||||
| 				if _, isSet := serviceDict[property]; isSet { | ||||
| 					unsupported[property] = true | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return sortedKeys(unsupported) | ||||
| } | ||||
| 
 | ||||
| func sortedKeys(set map[string]bool) []string { | ||||
| 	var keys []string | ||||
| 	for key := range set { | ||||
| 		keys = append(keys, key) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	return keys | ||||
| } | ||||
| 
 | ||||
| // GetDeprecatedProperties returns the list of any deprecated properties that | ||||
| // are used in the compose files. | ||||
| func GetDeprecatedProperties(configDicts ...map[string]interface{}) map[string]string { | ||||
| 	deprecated := map[string]string{} | ||||
| 
 | ||||
| 	for _, configDict := range configDicts { | ||||
| 		deprecatedProperties := getProperties(getServices(configDict), types.DeprecatedProperties) | ||||
| 		for key, value := range deprecatedProperties { | ||||
| 			deprecated[key] = value | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return deprecated | ||||
| } | ||||
| 
 | ||||
| func getProperties(services map[string]interface{}, propertyMap map[string]string) map[string]string { | ||||
| 	output := map[string]string{} | ||||
| 
 | ||||
| 	for _, service := range services { | ||||
| 		if serviceDict, ok := service.(map[string]interface{}); ok { | ||||
| 			for property, description := range propertyMap { | ||||
| 				if _, isSet := serviceDict[property]; isSet { | ||||
| 					output[property] = description | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return output | ||||
| } | ||||
| 
 | ||||
| // ForbiddenPropertiesError is returned when there are properties in the Compose | ||||
| // file that are forbidden. | ||||
| type ForbiddenPropertiesError struct { | ||||
| @@ -269,16 +300,6 @@ func (e *ForbiddenPropertiesError) Error() string { | ||||
| 	return "Configuration contains forbidden properties" | ||||
| } | ||||
| 
 | ||||
| func getServices(configDict map[string]interface{}) map[string]interface{} { | ||||
| 	if services, ok := configDict["services"]; ok { | ||||
| 		if servicesDict, ok := services.(map[string]interface{}); ok { | ||||
| 			return servicesDict | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return map[string]interface{}{} | ||||
| } | ||||
| 
 | ||||
| // Transform converts the source into the target struct with compose types transformer | ||||
| // and the specified transformers if any. | ||||
| func Transform(source interface{}, target interface{}, additionalTransformers ...Transformer) error { | ||||
| @@ -328,6 +349,9 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec | ||||
| 		reflect.TypeOf(types.ServiceVolumeConfig{}):              transformServiceVolumeConfig, | ||||
| 		reflect.TypeOf(types.BuildConfig{}):                      transformBuildConfig, | ||||
| 		reflect.TypeOf(types.Duration(0)):                        transformStringToDuration, | ||||
| 		reflect.TypeOf(types.DependsOnConfig{}):                  transformDependsOnConfig, | ||||
| 		reflect.TypeOf(types.ExtendsConfig{}):                    transformExtendsConfig, | ||||
| 		reflect.TypeOf(types.DeviceRequest{}):                    transformServiceDeviceRequest, | ||||
| 	} | ||||
| 
 | ||||
| 	for _, transformer := range additionalTransformers { | ||||
| @@ -367,7 +391,7 @@ func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interfac | ||||
| 		return dict, nil | ||||
| 	} | ||||
| 	if list, ok := value.([]interface{}); ok { | ||||
| 		var convertedList []interface{} | ||||
| 		convertedList := []interface{}{} | ||||
| 		for index, entry := range list { | ||||
| 			newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) | ||||
| 			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) | ||||
| @@ -393,20 +417,95 @@ func formatInvalidKeyError(keyPrefix string, key interface{}) error { | ||||
| 
 | ||||
| // LoadServices produces a ServiceConfig map from a compose file Dict | ||||
| // the servicesDict is not validated if directly used. Use Load() to enable validation | ||||
| func LoadServices(servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) { | ||||
| func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) { | ||||
| 	var services []types.ServiceConfig | ||||
| 
 | ||||
| 	for name, serviceDef := range servicesDict { | ||||
| 		serviceConfig, err := LoadService(name, serviceDef.(map[string]interface{}), workingDir, lookupEnv) | ||||
| 	for name := range servicesDict { | ||||
| 		serviceConfig, err := loadServiceWithExtends(filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{}) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		services = append(services, *serviceConfig) | ||||
| 	} | ||||
| 
 | ||||
| 	return services, nil | ||||
| } | ||||
| 
 | ||||
| func loadServiceWithExtends(filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) { | ||||
| 	if err := ct.Add(filename, name); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	serviceConfig, err := LoadService(name, servicesDict[name].(map[string]interface{}), workingDir, lookupEnv) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if serviceConfig.Extends != nil && !opts.SkipExtends { | ||||
| 		baseServiceName := *serviceConfig.Extends["service"] | ||||
| 		var baseService *types.ServiceConfig | ||||
| 		if file := serviceConfig.Extends["file"]; file == nil { | ||||
| 			baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Resolve the path to the imported file, and load it. | ||||
| 			baseFilePath := absPath(workingDir, *file) | ||||
| 
 | ||||
| 			bytes, err := ioutil.ReadFile(baseFilePath) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 
 | ||||
| 			if !opts.SkipInterpolation { | ||||
| 				substitute, err := opts.Interpolate.Substitute(string(bytes), template.Mapping(opts.Interpolate.LookupValue)) | ||||
| 				if err != nil { | ||||
| 					return nil, err | ||||
| 				} | ||||
| 				bytes = []byte(substitute) | ||||
| 			} | ||||
| 
 | ||||
| 			baseFile, err := ParseYAML(bytes) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 
 | ||||
| 			baseFileServices := getSection(baseFile, "services") | ||||
| 			baseService, err = loadServiceWithExtends(baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 
 | ||||
| 			// Make paths relative to the importing Compose file. Note that we | ||||
| 			// make the paths relative to `*file` rather than `baseFilePath` so | ||||
| 			// that the resulting paths won't be absolute if `*file` isn't an | ||||
| 			// absolute path. | ||||
| 			baseFileParent := filepath.Dir(*file) | ||||
| 			if baseService.Build != nil { | ||||
| 				// Note that the Dockerfile is always defined relative to the | ||||
| 				// build context, so there's no need to update the Dockerfile field. | ||||
| 				baseService.Build.Context = absPath(baseFileParent, baseService.Build.Context) | ||||
| 			} | ||||
| 
 | ||||
| 			for i, vol := range baseService.Volumes { | ||||
| 				if vol.Type != types.VolumeTypeBind { | ||||
| 					continue | ||||
| 				} | ||||
| 				baseService.Volumes[i].Source = absPath(baseFileParent, vol.Source) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if err := mergo.Merge(baseService, serviceConfig, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil { | ||||
| 			return nil, errors.Wrapf(err, "cannot merge service %s", name) | ||||
| 		} | ||||
| 		serviceConfig = baseService | ||||
| 	} | ||||
| 
 | ||||
| 	return serviceConfig, nil | ||||
| } | ||||
| 
 | ||||
| // LoadService produces a single ServiceConfig from a compose file Dict | ||||
| // the serviceDict is not validated if directly used. Use Load() to enable validation | ||||
| func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) { | ||||
| @@ -424,62 +523,34 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	serviceConfig.Extras = getExtras(serviceDict) | ||||
| 
 | ||||
| 	return serviceConfig, nil | ||||
| } | ||||
| 
 | ||||
| func loadExtras(name string, source map[string]interface{}) map[string]interface{} { | ||||
| 	if dict, ok := source[name].(map[string]interface{}); ok { | ||||
| 		return getExtras(dict) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func getExtras(dict map[string]interface{}) map[string]interface{} { | ||||
| 	extras := map[string]interface{}{} | ||||
| 	for key, value := range dict { | ||||
| 		if strings.HasPrefix(key, "x-") { | ||||
| 			extras[key] = value | ||||
| 		} | ||||
| 	} | ||||
| 	if len(extras) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return extras | ||||
| } | ||||
| 
 | ||||
| func updateEnvironment(environment map[string]*string, vars map[string]*string, lookupEnv template.Mapping) { | ||||
| 	for k, v := range vars { | ||||
| 		interpolatedV, ok := lookupEnv(k) | ||||
| 		if (v == nil || *v == "") && ok { | ||||
| 			// lookupEnv is prioritized over vars | ||||
| 			environment[k] = &interpolatedV | ||||
| 		} else { | ||||
| 			environment[k] = v | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error { | ||||
| 	environment := make(map[string]*string) | ||||
| 	environment := types.MappingWithEquals{} | ||||
| 
 | ||||
| 	if len(serviceConfig.EnvFile) > 0 { | ||||
| 		var envVars []string | ||||
| 
 | ||||
| 		for _, file := range serviceConfig.EnvFile { | ||||
| 			filePath := absPath(workingDir, file) | ||||
| 			fileVars, err := opts.ParseEnvFile(filePath) | ||||
| 			file, err := os.Open(filePath) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			envVars = append(envVars, fileVars...) | ||||
| 			defer file.Close() | ||||
| 			fileVars, err := godotenv.Parse(file) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			env := types.MappingWithEquals{} | ||||
| 			for k, v := range fileVars { | ||||
| 				v := v | ||||
| 				env[k] = &v | ||||
| 			} | ||||
| 			environment.OverrideBy(env.Resolve(lookupEnv).RemoveEmpty()) | ||||
| 		} | ||||
| 		updateEnvironment(environment, | ||||
| 			opts.ConvertKVStringsToMapWithNil(envVars), lookupEnv) | ||||
| 	} | ||||
| 
 | ||||
| 	updateEnvironment(environment, serviceConfig.Environment, lookupEnv) | ||||
| 	environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv)) | ||||
| 	serviceConfig.Environment = environment | ||||
| 	return nil | ||||
| } | ||||
| @@ -513,12 +584,12 @@ func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, | ||||
| // TODO: make this more robust | ||||
| func expandUser(path string, lookupEnv template.Mapping) string { | ||||
| 	if strings.HasPrefix(path, "~") { | ||||
| 		home, ok := lookupEnv("HOME") | ||||
| 		if !ok { | ||||
| 		home, err := os.UserHomeDir() | ||||
| 		if err != nil { | ||||
| 			logrus.Warn("cannot expand '~', because the environment lacks HOME") | ||||
| 			return path | ||||
| 		} | ||||
| 		return strings.Replace(path, "~", home, 1) | ||||
| 		return filepath.Join(home, path[1:]) | ||||
| 	} | ||||
| 	return path | ||||
| } | ||||
| @@ -529,8 +600,12 @@ func transformUlimits(data interface{}) (interface{}, error) { | ||||
| 		return types.UlimitsConfig{Single: value}, nil | ||||
| 	case map[string]interface{}: | ||||
| 		ulimit := types.UlimitsConfig{} | ||||
| 		ulimit.Soft = value["soft"].(int) | ||||
| 		ulimit.Hard = value["hard"].(int) | ||||
| 		if v, ok := value["soft"]; ok { | ||||
| 			ulimit.Soft = v.(int) | ||||
| 		} | ||||
| 		if v, ok := value["hard"]; ok { | ||||
| 			ulimit.Hard = v.(int) | ||||
| 		} | ||||
| 		return ulimit, nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for ulimits", value) | ||||
| @@ -554,15 +629,12 @@ func LoadNetworks(source map[string]interface{}, version string) (map[string]typ | ||||
| 			if network.Name != "" { | ||||
| 				return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) | ||||
| 			} | ||||
| 			if versions.GreaterThanOrEqualTo(version, "3.5") { | ||||
| 			logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name) | ||||
| 			} | ||||
| 			network.Name = network.External.Name | ||||
| 			network.External.Name = "" | ||||
| 		case network.Name == "": | ||||
| 			network.Name = name | ||||
| 		} | ||||
| 		network.Extras = loadExtras(name, source) | ||||
| 		networks[name] = network | ||||
| 	} | ||||
| 	return networks, nil | ||||
| @@ -576,7 +648,7 @@ func externalVolumeError(volume, key string) error { | ||||
| 
 | ||||
| // LoadVolumes produces a VolumeConfig map from a compose file Dict | ||||
| // the source Dict is not validated if directly used. Use Load() to enable validation | ||||
| func LoadVolumes(source map[string]interface{}, version string) (map[string]types.VolumeConfig, error) { | ||||
| func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) { | ||||
| 	volumes := make(map[string]types.VolumeConfig) | ||||
| 	if err := Transform(source, &volumes); err != nil { | ||||
| 		return volumes, err | ||||
| @@ -597,15 +669,12 @@ func LoadVolumes(source map[string]interface{}, version string) (map[string]type | ||||
| 			if volume.Name != "" { | ||||
| 				return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name) | ||||
| 			} | ||||
| 			if versions.GreaterThanOrEqualTo(version, "3.4") { | ||||
| 			logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name) | ||||
| 			} | ||||
| 			volume.Name = volume.External.Name | ||||
| 			volume.External.Name = "" | ||||
| 		case volume.Name == "": | ||||
| 			volume.Name = name | ||||
| 		} | ||||
| 		volume.Extras = loadExtras(name, source) | ||||
| 		volumes[name] = volume | ||||
| 	} | ||||
| 	return volumes, nil | ||||
| @@ -624,7 +693,6 @@ func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (ma | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		secretConfig := types.SecretConfig(obj) | ||||
| 		secretConfig.Extras = loadExtras(name, source) | ||||
| 		secrets[name] = secretConfig | ||||
| 	} | ||||
| 	return secrets, nil | ||||
| @@ -643,7 +711,6 @@ func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		configConfig := types.ConfigObjConfig(obj) | ||||
| 		configConfig.Extras = loadExtras(name, source) | ||||
| 		configs[name] = configConfig | ||||
| 	} | ||||
| 	return configs, nil | ||||
| @@ -658,9 +725,7 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi | ||||
| 			if obj.Name != "" { | ||||
| 				return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name) | ||||
| 			} | ||||
| 			if versions.GreaterThanOrEqualTo(details.Version, "3.5") { | ||||
| 			logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name) | ||||
| 			} | ||||
| 			obj.Name = obj.External.Name | ||||
| 			obj.External.Name = "" | ||||
| 		} else { | ||||
| @@ -681,6 +746,10 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi | ||||
| } | ||||
| 
 | ||||
| func absPath(workingDir string, filePath string) string { | ||||
| 	if strings.HasPrefix(filePath, "~") { | ||||
| 		home, _ := os.UserHomeDir() | ||||
| 		return filepath.Join(home, filePath[1:]) | ||||
| 	} | ||||
| 	if filepath.IsAbs(filePath) { | ||||
| 		return filePath | ||||
| 	} | ||||
| @@ -719,19 +788,23 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{}, | ||||
| 		for _, entry := range entries { | ||||
| 			switch value := entry.(type) { | ||||
| 			case int: | ||||
| 				v, err := toServicePortConfigs(fmt.Sprint(value)) | ||||
| 				parsed, err := types.ParsePortConfig(fmt.Sprint(value)) | ||||
| 				if err != nil { | ||||
| 					return data, err | ||||
| 				} | ||||
| 				ports = append(ports, v...) | ||||
| 				for _, v := range parsed { | ||||
| 					ports = append(ports, v) | ||||
| 				} | ||||
| 			case string: | ||||
| 				v, err := toServicePortConfigs(value) | ||||
| 				parsed, err := types.ParsePortConfig(value) | ||||
| 				if err != nil { | ||||
| 					return data, err | ||||
| 				} | ||||
| 				ports = append(ports, v...) | ||||
| 				for _, v := range parsed { | ||||
| 					ports = append(ports, v) | ||||
| 				} | ||||
| 			case map[string]interface{}: | ||||
| 				ports = append(ports, value) | ||||
| 				ports = append(ports, groupXFieldsIntoExtensions(value)) | ||||
| 			default: | ||||
| 				return data, errors.Errorf("invalid type %T for port", value) | ||||
| 			} | ||||
| @@ -742,12 +815,36 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (interface{}, error) { | ||||
| 	switch value := data.(type) { | ||||
| 	case map[string]interface{}: | ||||
| 		count, ok := value["count"] | ||||
| 		if ok { | ||||
| 			switch val := count.(type) { | ||||
| 			case int: | ||||
| 				return value, nil | ||||
| 			case string: | ||||
| 				if strings.ToLower(val) == "all" { | ||||
| 					value["count"] = -1 | ||||
| 					return value, nil | ||||
| 				} | ||||
| 				return data, errors.Errorf("invalid string value for 'count' (the only value allowed is 'all')") | ||||
| 			default: | ||||
| 				return data, errors.Errorf("invalid type %T for device count", val) | ||||
| 			} | ||||
| 		} | ||||
| 		return data, nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for resource reservation", value) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) { | ||||
| 	switch value := data.(type) { | ||||
| 	case string: | ||||
| 		return map[string]interface{}{"source": value}, nil | ||||
| 	case map[string]interface{}: | ||||
| 		return data, nil | ||||
| 		return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for secret", value) | ||||
| 	} | ||||
| @@ -758,18 +855,47 @@ var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, | ||||
| 	case string: | ||||
| 		return map[string]interface{}{"context": value}, nil | ||||
| 	case map[string]interface{}: | ||||
| 		return data, nil | ||||
| 		return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for service build", value) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface{}, error) { | ||||
| 	switch value := data.(type) { | ||||
| 	case []interface{}: | ||||
| 		transformed := map[string]interface{}{} | ||||
| 		for _, serviceIntf := range value { | ||||
| 			service, ok := serviceIntf.(string) | ||||
| 			if !ok { | ||||
| 				return data, errors.Errorf("invalid type %T for service depends_on element. Expected string.", value) | ||||
| 			} | ||||
| 			transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted} | ||||
| 		} | ||||
| 		return transformed, nil | ||||
| 	case map[string]interface{}: | ||||
| 		return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for service depends_on", value) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}, error) { | ||||
| 	switch data.(type) { | ||||
| 	case string: | ||||
| 		data = map[string]interface{}{ | ||||
| 			"service": data, | ||||
| 		} | ||||
| 	} | ||||
| 	return transformMappingOrListFunc("=", true)(data) | ||||
| } | ||||
| 
 | ||||
| var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) { | ||||
| 	switch value := data.(type) { | ||||
| 	case string: | ||||
| 		return ParseVolume(value) | ||||
| 	case map[string]interface{}: | ||||
| 		return data, nil | ||||
| 		return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil | ||||
| 	default: | ||||
| 		return data, errors.Errorf("invalid type %T for service volume", value) | ||||
| 	} | ||||
| @@ -853,7 +979,7 @@ func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool | ||||
| 
 | ||||
| var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) { | ||||
| 	if str, ok := value.(string); ok { | ||||
| 		return shlex.Split(str) | ||||
| 		return shellwords.Parse(str) | ||||
| 	} | ||||
| 	return value, nil | ||||
| } | ||||
| @@ -892,39 +1018,6 @@ var transformStringToDuration TransformerFunc = func(value interface{}) (interfa | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func toServicePortConfigs(value string) ([]interface{}, error) { | ||||
| 	var portConfigs []interface{} | ||||
| 
 | ||||
| 	ports, portBindings, err := nat.ParsePortSpecs([]string{value}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// We need to sort the key of the ports to make sure it is consistent | ||||
| 	keys := []string{} | ||||
| 	for port := range ports { | ||||
| 		keys = append(keys, string(port)) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 
 | ||||
| 	for _, key := range keys { | ||||
| 		// Reuse ConvertPortToPortConfig so that it is consistent | ||||
| 		portConfig, err := opts.ConvertPortToPortConfig(nat.Port(key), portBindings) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		for _, p := range portConfig { | ||||
| 			portConfigs = append(portConfigs, types.ServicePortConfig{ | ||||
| 				Protocol:  string(p.Protocol), | ||||
| 				Target:    p.TargetPort, | ||||
| 				Published: p.PublishedPort, | ||||
| 				Mode:      string(p.PublishMode), | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return portConfigs, nil | ||||
| } | ||||
| 
 | ||||
| func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} { | ||||
| 	output := make(map[string]interface{}) | ||||
| 	for key, value := range value { | ||||
| @@ -1,10 +1,26 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package loader | ||||
| 
 | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 
 | ||||
| 	"github.com/docker/cli/cli/compose/types" | ||||
| 	"github.com/compose-spec/compose-go/types" | ||||
| 	"github.com/imdario/mergo" | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
| @@ -13,6 +29,18 @@ type specials struct { | ||||
| 	m map[reflect.Type]func(dst, src reflect.Value) error | ||||
| } | ||||
| 
 | ||||
| var serviceSpecials = &specials{ | ||||
| 	m: map[reflect.Type]func(dst, src reflect.Value) error{ | ||||
| 		reflect.TypeOf(&types.LoggingConfig{}):           safelyMerge(mergeLoggingConfig), | ||||
| 		reflect.TypeOf(&types.UlimitsConfig{}):           safelyMerge(mergeUlimitsConfig), | ||||
| 		reflect.TypeOf([]types.ServicePortConfig{}):      mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice), | ||||
| 		reflect.TypeOf([]types.ServiceSecretConfig{}):    mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice), | ||||
| 		reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice), | ||||
| 		reflect.TypeOf(&types.UlimitsConfig{}):           mergeUlimitsConfig, | ||||
| 		reflect.TypeOf(&types.ServiceNetworkConfig{}):    mergeServiceNetworkConfig, | ||||
| 	}, | ||||
| } | ||||
| 
 | ||||
| func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error { | ||||
| 	if fn, ok := s.m[t]; ok { | ||||
| 		return fn | ||||
| @@ -44,6 +72,10 @@ func merge(configs []*types.Config) (*types.Config, error) { | ||||
| 		if err != nil { | ||||
| 			return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename) | ||||
| 		} | ||||
| 		base.Extensions, err = mergeExtensions(base.Extensions, override.Extensions) | ||||
| 		if err != nil { | ||||
| 			return base, errors.Wrapf(err, "cannot merge extensions from %s", override.Filename) | ||||
| 		} | ||||
| 	} | ||||
| 	return base, nil | ||||
| } | ||||
| @@ -51,22 +83,15 @@ func merge(configs []*types.Config) (*types.Config, error) { | ||||
| func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) { | ||||
| 	baseServices := mapByName(base) | ||||
| 	overrideServices := mapByName(override) | ||||
| 	specials := &specials{ | ||||
| 		m: map[reflect.Type]func(dst, src reflect.Value) error{ | ||||
| 			reflect.TypeOf(&types.LoggingConfig{}):           safelyMerge(mergeLoggingConfig), | ||||
| 			reflect.TypeOf([]types.ServicePortConfig{}):      mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice), | ||||
| 			reflect.TypeOf([]types.ServiceSecretConfig{}):    mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice), | ||||
| 			reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice), | ||||
| 			reflect.TypeOf(&types.UlimitsConfig{}):           mergeUlimitsConfig, | ||||
| 			reflect.TypeOf(&types.ServiceNetworkConfig{}):    mergeServiceNetworkConfig, | ||||
| 		}, | ||||
| 	} | ||||
| 	for name, overrideService := range overrideServices { | ||||
| 		overrideService := overrideService | ||||
| 		if baseService, ok := baseServices[name]; ok { | ||||
| 			if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil { | ||||
| 			if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil { | ||||
| 				return base, errors.Wrapf(err, "cannot merge service %s", name) | ||||
| 			} | ||||
| 			if len(overrideService.Command) > 0 { | ||||
| 				baseService.Command = overrideService.Command | ||||
| 			} | ||||
| 			baseServices[name] = baseService | ||||
| 			continue | ||||
| 		} | ||||
| @@ -256,3 +281,11 @@ func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]t | ||||
| 	err := mergo.Map(&base, &override, mergo.WithOverride) | ||||
| 	return base, err | ||||
| } | ||||
| 
 | ||||
| func mergeExtensions(base, override map[string]interface{}) (map[string]interface{}, error) { | ||||
| 	if base == nil { | ||||
| 		base = map[string]interface{}{} | ||||
| 	} | ||||
| 	err := mergo.Map(&base, &override, mergo.WithOverride) | ||||
| 	return base, err | ||||
| } | ||||
							
								
								
									
										239
									
								
								vendor/github.com/compose-spec/compose-go/loader/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										239
									
								
								vendor/github.com/compose-spec/compose-go/loader/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,239 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
|  | ||||
| package loader | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
|  | ||||
| 	"github.com/compose-spec/compose-go/errdefs" | ||||
| 	"github.com/compose-spec/compose-go/types" | ||||
| 	"github.com/pkg/errors" | ||||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
|  | ||||
| // normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults | ||||
| func normalize(project *types.Project) error { | ||||
| 	absWorkingDir, err := filepath.Abs(project.WorkingDir) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	project.WorkingDir = absWorkingDir | ||||
|  | ||||
| 	absComposeFiles, err := absComposeFiles(project.ComposeFiles) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	project.ComposeFiles = absComposeFiles | ||||
|  | ||||
| 	// If not declared explicitly, Compose model involves an implicit "default" network | ||||
| 	if _, ok := project.Networks["default"]; !ok { | ||||
| 		project.Networks["default"] = types.NetworkConfig{} | ||||
| 	} | ||||
|  | ||||
| 	err = relocateExternalName(project) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	for i, s := range project.Services { | ||||
| 		if len(s.Networks) == 0 && s.NetworkMode == "" { | ||||
| 			// Service without explicit network attachment are implicitly exposed on default network | ||||
| 			s.Networks = map[string]*types.ServiceNetworkConfig{"default": nil} | ||||
| 		} | ||||
|  | ||||
| 		if s.PullPolicy == types.PullPolicyIfNotPresent { | ||||
| 			s.PullPolicy = types.PullPolicyMissing | ||||
| 		} | ||||
|  | ||||
| 		fn := func(s string) (string, bool) { | ||||
| 			v, ok := project.Environment[s] | ||||
| 			return v, ok | ||||
| 		} | ||||
|  | ||||
| 		if s.Build != nil { | ||||
| 			if s.Build.Dockerfile == "" { | ||||
| 				s.Build.Dockerfile = "Dockerfile" | ||||
| 			} | ||||
| 			localContext := absPath(project.WorkingDir, s.Build.Context) | ||||
| 			if _, err := os.Stat(localContext); err == nil { | ||||
| 				s.Build.Context = localContext | ||||
| 				s.Build.Dockerfile = absPath(localContext, s.Build.Dockerfile) | ||||
| 			} else { | ||||
| 				// might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous | ||||
| 				// in moby/moby and not defined by compose-spec, so let's assume runtime will check | ||||
| 			} | ||||
| 			s.Build.Args = s.Build.Args.Resolve(fn) | ||||
| 		} | ||||
| 		s.Environment = s.Environment.Resolve(fn) | ||||
|  | ||||
| 		err := relocateLogDriver(s) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		err = relocateLogOpt(s) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		err = relocateDockerfile(s) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		project.Services[i] = s | ||||
| 	} | ||||
|  | ||||
| 	setNameFromKey(project) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func absComposeFiles(composeFiles []string) ([]string, error) { | ||||
| 	absComposeFiles := make([]string, len(composeFiles)) | ||||
| 	for i, composeFile := range composeFiles { | ||||
| 		absComposefile, err := filepath.Abs(composeFile) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		absComposeFiles[i] = absComposefile | ||||
| 	} | ||||
| 	return absComposeFiles, nil | ||||
| } | ||||
|  | ||||
| // Resources with no explicit name are actually named by their key in map | ||||
| func setNameFromKey(project *types.Project) { | ||||
| 	for i, n := range project.Networks { | ||||
| 		if n.Name == "" { | ||||
| 			n.Name = fmt.Sprintf("%s_%s", project.Name, i) | ||||
| 			project.Networks[i] = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i, v := range project.Volumes { | ||||
| 		if v.Name == "" { | ||||
| 			v.Name = fmt.Sprintf("%s_%s", project.Name, i) | ||||
| 			project.Volumes[i] = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i, c := range project.Configs { | ||||
| 		if c.Name == "" { | ||||
| 			c.Name = fmt.Sprintf("%s_%s", project.Name, i) | ||||
| 			project.Configs[i] = c | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i, s := range project.Secrets { | ||||
| 		if s.Name == "" { | ||||
| 			s.Name = fmt.Sprintf("%s_%s", project.Name, i) | ||||
| 			project.Secrets[i] = s | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func relocateExternalName(project *types.Project) error { | ||||
| 	for i, n := range project.Networks { | ||||
| 		if n.External.Name != "" { | ||||
| 			if n.Name != "" { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, "can't use both 'networks.external.name' (deprecated) and 'networks.name'") | ||||
| 			} | ||||
| 			n.Name = n.External.Name | ||||
| 		} | ||||
| 		project.Networks[i] = n | ||||
| 	} | ||||
|  | ||||
| 	for i, v := range project.Volumes { | ||||
| 		if v.External.Name != "" { | ||||
| 			if v.Name != "" { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, "can't use both 'volumes.external.name' (deprecated) and 'volumes.name'") | ||||
| 			} | ||||
| 			v.Name = v.External.Name | ||||
| 		} | ||||
| 		project.Volumes[i] = v | ||||
| 	} | ||||
|  | ||||
| 	for i, s := range project.Secrets { | ||||
| 		if s.External.Name != "" { | ||||
| 			if s.Name != "" { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, "can't use both 'secrets.external.name' (deprecated) and 'secrets.name'") | ||||
| 			} | ||||
| 			s.Name = s.External.Name | ||||
| 		} | ||||
| 		project.Secrets[i] = s | ||||
| 	} | ||||
|  | ||||
| 	for i, c := range project.Configs { | ||||
| 		if c.External.Name != "" { | ||||
| 			if c.Name != "" { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, "can't use both 'configs.external.name' (deprecated) and 'configs.name'") | ||||
| 			} | ||||
| 			c.Name = c.External.Name | ||||
| 		} | ||||
| 		project.Configs[i] = c | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func relocateLogOpt(s types.ServiceConfig) error { | ||||
| 	if len(s.LogOpt) != 0 { | ||||
| 		logrus.Warn("`log_opts` is deprecated. Use the `logging` element") | ||||
| 		if s.Logging == nil { | ||||
| 			s.Logging = &types.LoggingConfig{} | ||||
| 		} | ||||
| 		for k, v := range s.LogOpt { | ||||
| 			if _, ok := s.Logging.Options[k]; !ok { | ||||
| 				s.Logging.Options[k] = v | ||||
| 			} else { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, "can't use both 'log_opt' (deprecated) and 'logging.options'") | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func relocateLogDriver(s types.ServiceConfig) error { | ||||
| 	if s.LogDriver != "" { | ||||
| 		logrus.Warn("`log_driver` is deprecated. Use the `logging` element") | ||||
| 		if s.Logging == nil { | ||||
| 			s.Logging = &types.LoggingConfig{} | ||||
| 		} | ||||
| 		if s.Logging.Driver == "" { | ||||
| 			s.Logging.Driver = s.LogDriver | ||||
| 		} else { | ||||
| 			return errors.Wrap(errdefs.ErrInvalid, "can't use both 'log_driver' (deprecated) and 'logging.driver'") | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func relocateDockerfile(s types.ServiceConfig) error { | ||||
| 	if s.Dockerfile != "" { | ||||
| 		logrus.Warn("`dockerfile` is deprecated. Use the `build` element") | ||||
| 		if s.Build == nil { | ||||
| 			s.Build = &types.BuildConfig{} | ||||
| 		} | ||||
| 		if s.Dockerfile == "" { | ||||
| 			s.Build.Dockerfile = s.Dockerfile | ||||
| 		} else { | ||||
| 			return errors.Wrap(errdefs.ErrInvalid, "can't use both 'dockerfile' (deprecated) and 'build.dockerfile'") | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										77
									
								
								vendor/github.com/compose-spec/compose-go/loader/validate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/compose-spec/compose-go/loader/validate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
|  | ||||
| package loader | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/compose-spec/compose-go/errdefs" | ||||
| 	"github.com/compose-spec/compose-go/types" | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
|  | ||||
| // checkConsistency validate a compose model is consistent | ||||
| func checkConsistency(project *types.Project) error { | ||||
| 	for _, s := range project.Services { | ||||
| 		if s.Build == nil && s.Image == "" { | ||||
| 			return errors.Wrapf(errdefs.ErrInvalid, "service %q has neither an image nor a build context specified", s.Name) | ||||
| 		} | ||||
|  | ||||
| 		for network := range s.Networks { | ||||
| 			if _, ok := project.Networks[network]; !ok { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined network %s", s.Name, network)) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if strings.HasPrefix(s.NetworkMode, types.NetworkModeServicePrefix) { | ||||
| 			serviceName := s.NetworkMode[len(types.NetworkModeServicePrefix):] | ||||
| 			if _, err := project.GetServices(serviceName); err != nil { | ||||
| 				return fmt.Errorf("service %q not found for network_mode 'service:%s'", serviceName, serviceName) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if strings.HasPrefix(s.NetworkMode, types.NetworkModeContainerPrefix) { | ||||
| 			containerName := s.NetworkMode[len(types.NetworkModeContainerPrefix):] | ||||
| 			if _, err := project.GetByContainerName(containerName); err != nil { | ||||
| 				return fmt.Errorf("service with container_name %q not found for network_mode 'container:%s'", containerName, containerName) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, volume := range s.Volumes { | ||||
| 			switch volume.Type { | ||||
| 			case types.VolumeTypeVolume: | ||||
| 				if volume.Source != "" { // non anonymous volumes | ||||
| 					if _, ok := project.Volumes[volume.Source]; !ok { | ||||
| 						return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined volume %s", s.Name, volume.Source)) | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		for _, secret := range s.Secrets { | ||||
| 			if _, ok := project.Secrets[secret.Source]; !ok { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined secret %s", s.Name, secret.Source)) | ||||
| 			} | ||||
| 		} | ||||
| 		for _, config := range s.Configs { | ||||
| 			if _, ok := project.Configs[config.Source]; !ok { | ||||
| 				return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined config %s", s.Name, config.Source)) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -1,3 +1,19 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package loader | ||||
| 
 | ||||
| import ( | ||||
| @@ -5,8 +21,7 @@ import ( | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| 
 | ||||
| 	"github.com/docker/cli/cli/compose/types" | ||||
| 	"github.com/docker/docker/api/types/mount" | ||||
| 	"github.com/compose-spec/compose-go/types" | ||||
| 	"github.com/pkg/errors" | ||||
| ) | ||||
| 
 | ||||
| @@ -21,7 +36,7 @@ func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { | ||||
| 		return volume, errors.New("invalid empty volume spec") | ||||
| 	case 1, 2: | ||||
| 		volume.Target = spec | ||||
| 		volume.Type = string(mount.TypeVolume) | ||||
| 		volume.Type = string(types.VolumeTypeVolume) | ||||
| 		return volume, nil | ||||
| 	} | ||||
| 
 | ||||
| @@ -85,9 +100,18 @@ func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolu | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| var Propagations = []string{ | ||||
| 	types.PropagationRPrivate, | ||||
| 	types.PropagationPrivate, | ||||
| 	types.PropagationRShared, | ||||
| 	types.PropagationShared, | ||||
| 	types.PropagationRSlave, | ||||
| 	types.PropagationSlave, | ||||
| } | ||||
| 
 | ||||
| func isBindOption(option string) bool { | ||||
| 	for _, propagation := range mount.Propagations { | ||||
| 		if mount.Propagation(option) == propagation { | ||||
| 	for _, propagation := range Propagations { | ||||
| 		if option == propagation { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| @@ -95,25 +119,30 @@ func isBindOption(option string) bool { | ||||
| } | ||||
| 
 | ||||
| func populateType(volume *types.ServiceVolumeConfig) { | ||||
| 	switch { | ||||
| 	// Anonymous volume | ||||
| 	case volume.Source == "": | ||||
| 		volume.Type = string(mount.TypeVolume) | ||||
| 	case isFilePath(volume.Source): | ||||
| 		volume.Type = string(mount.TypeBind) | ||||
| 	default: | ||||
| 		volume.Type = string(mount.TypeVolume) | ||||
| 	if isFilePath(volume.Source) { | ||||
| 		volume.Type = types.VolumeTypeBind | ||||
| 		if volume.Bind == nil { | ||||
| 			volume.Bind = &types.ServiceVolumeBind{} | ||||
| 		} | ||||
| 		// For backward compatibility with docker-compose legacy, using short notation involves | ||||
| 		// bind will create missing host path | ||||
| 		volume.Bind.CreateHostPath = true | ||||
| 	} else { | ||||
| 		volume.Type = types.VolumeTypeVolume | ||||
| 		if volume.Volume == nil { | ||||
| 			volume.Volume = &types.ServiceVolumeVolume{} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func isFilePath(source string) bool { | ||||
| 	if source == "" { | ||||
| 		return false | ||||
| 	} | ||||
| 	switch source[0] { | ||||
| 	case '.', '/', '~': | ||||
| 		return true | ||||
| 	} | ||||
| 	if len([]rune(source)) == 1 { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	// windows named pipes | ||||
| 	if strings.HasPrefix(source, `\\`) { | ||||
| @@ -1,3 +1,19 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package loader | ||||
| 
 | ||||
| // Copyright 2010 The Go Authors. All rights reserved. | ||||
							
								
								
									
										813
									
								
								vendor/github.com/compose-spec/compose-go/schema/compose-spec.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										813
									
								
								vendor/github.com/compose-spec/compose-go/schema/compose-spec.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,813 @@ | ||||
| { | ||||
|   "$schema": "http://json-schema.org/draft/2019-09/schema#", | ||||
|   "id": "compose_spec.json", | ||||
|   "type": "object", | ||||
|   "title": "Compose Specification", | ||||
|   "description": "The Compose file is a YAML file defining a multi-containers based application.", | ||||
|  | ||||
|   "properties": { | ||||
|     "version": { | ||||
|       "type": "string", | ||||
|       "description": "Version of the Compose specification used. Tools not implementing required version MUST reject the configuration file." | ||||
|     }, | ||||
|  | ||||
|     "services": { | ||||
|       "id": "#/properties/services", | ||||
|       "type": "object", | ||||
|       "patternProperties": { | ||||
|         "^[a-zA-Z0-9._-]+$": { | ||||
|           "$ref": "#/definitions/service" | ||||
|         } | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|  | ||||
|     "networks": { | ||||
|       "id": "#/properties/networks", | ||||
|       "type": "object", | ||||
|       "patternProperties": { | ||||
|         "^[a-zA-Z0-9._-]+$": { | ||||
|           "$ref": "#/definitions/network" | ||||
|         } | ||||
|       } | ||||
|     }, | ||||
|  | ||||
|     "volumes": { | ||||
|       "id": "#/properties/volumes", | ||||
|       "type": "object", | ||||
|       "patternProperties": { | ||||
|         "^[a-zA-Z0-9._-]+$": { | ||||
|           "$ref": "#/definitions/volume" | ||||
|         } | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|  | ||||
|     "secrets": { | ||||
|       "id": "#/properties/secrets", | ||||
|       "type": "object", | ||||
|       "patternProperties": { | ||||
|         "^[a-zA-Z0-9._-]+$": { | ||||
|           "$ref": "#/definitions/secret" | ||||
|         } | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|  | ||||
|     "configs": { | ||||
|       "id": "#/properties/configs", | ||||
|       "type": "object", | ||||
|       "patternProperties": { | ||||
|         "^[a-zA-Z0-9._-]+$": { | ||||
|           "$ref": "#/definitions/config" | ||||
|         } | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     } | ||||
|   }, | ||||
|  | ||||
|   "patternProperties": {"^x-": {}}, | ||||
|   "additionalProperties": false, | ||||
|  | ||||
|   "definitions": { | ||||
|  | ||||
|     "service": { | ||||
|       "id": "#/definitions/service", | ||||
|       "type": "object", | ||||
|  | ||||
|       "properties": { | ||||
|         "deploy": {"$ref": "#/definitions/deployment"}, | ||||
|         "build": { | ||||
|           "oneOf": [ | ||||
|             {"type": "string"}, | ||||
|             { | ||||
|               "type": "object", | ||||
|               "properties": { | ||||
|                 "context": {"type": "string"}, | ||||
|                 "dockerfile": {"type": "string"}, | ||||
|                 "args": {"$ref": "#/definitions/list_or_dict"}, | ||||
|                 "labels": {"$ref": "#/definitions/list_or_dict"}, | ||||
|                 "cache_from": {"type": "array", "items": {"type": "string"}}, | ||||
|                 "network": {"type": "string"}, | ||||
|                 "target": {"type": "string"}, | ||||
|                 "shm_size": {"type": ["integer", "string"]}, | ||||
|                 "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, | ||||
|                 "isolation": {"type": "string"} | ||||
|               }, | ||||
|               "additionalProperties": false, | ||||
|               "patternProperties": {"^x-": {}} | ||||
|             } | ||||
|           ] | ||||
|         }, | ||||
|         "blkio_config": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "device_read_bps": { | ||||
|               "type": "array", | ||||
|               "items": {"$ref": "#/definitions/blkio_limit"} | ||||
|             }, | ||||
|             "device_read_iops": { | ||||
|               "type": "array", | ||||
|               "items": {"$ref": "#/definitions/blkio_limit"} | ||||
|             }, | ||||
|             "device_write_bps": { | ||||
|               "type": "array", | ||||
|               "items": {"$ref": "#/definitions/blkio_limit"} | ||||
|             }, | ||||
|             "device_write_iops": { | ||||
|               "type": "array", | ||||
|               "items": {"$ref": "#/definitions/blkio_limit"} | ||||
|             }, | ||||
|             "weight": {"type": "integer"}, | ||||
|             "weight_device": { | ||||
|               "type": "array", | ||||
|               "items": {"$ref": "#/definitions/blkio_weight"} | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false | ||||
|         }, | ||||
|         "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "cgroup_parent": {"type": "string"}, | ||||
|         "command": { | ||||
|           "oneOf": [ | ||||
|             {"type": "string"}, | ||||
|             {"type": "array", "items": {"type": "string"}} | ||||
|           ] | ||||
|         }, | ||||
|         "configs": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "oneOf": [ | ||||
|               {"type": "string"}, | ||||
|               { | ||||
|                 "type": "object", | ||||
|                 "properties": { | ||||
|                   "source": {"type": "string"}, | ||||
|                   "target": {"type": "string"}, | ||||
|                   "uid": {"type": "string"}, | ||||
|                   "gid": {"type": "string"}, | ||||
|                   "mode": {"type": "number"} | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             ] | ||||
|           } | ||||
|         }, | ||||
|         "container_name": {"type": "string"}, | ||||
|         "cpu_count": {"type": "integer", "minimum": 0}, | ||||
|         "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100}, | ||||
|         "cpu_shares": {"type": ["number", "string"]}, | ||||
|         "cpu_quota": {"type": ["number", "string"]}, | ||||
|         "cpu_period": {"type": ["number", "string"]}, | ||||
|         "cpu_rt_period": {"type": ["number", "string"]}, | ||||
|         "cpu_rt_runtime": {"type": ["number", "string"]}, | ||||
|         "cpus": {"type": ["number", "string"]}, | ||||
|         "cpuset": {"type": "string"}, | ||||
|         "credential_spec": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "config": {"type": "string"}, | ||||
|             "file": {"type": "string"}, | ||||
|             "registry": {"type": "string"} | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "depends_on": { | ||||
|           "oneOf": [ | ||||
|             {"$ref": "#/definitions/list_of_strings"}, | ||||
|             { | ||||
|               "type": "object", | ||||
|               "additionalProperties": false, | ||||
|               "patternProperties": { | ||||
|                 "^[a-zA-Z0-9._-]+$": { | ||||
|                   "type": "object", | ||||
|                   "additionalProperties": false, | ||||
|                   "properties": { | ||||
|                     "condition": { | ||||
|                       "type": "string", | ||||
|                       "enum": ["service_started", "service_healthy", "service_completed_successfully"] | ||||
|                     } | ||||
|                   }, | ||||
|                   "required": ["condition"] | ||||
|                 } | ||||
|               } | ||||
|             } | ||||
|           ] | ||||
|         }, | ||||
|         "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"}, | ||||
|         "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "dns": {"$ref": "#/definitions/string_or_list"}, | ||||
|         "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "dns_search": {"$ref": "#/definitions/string_or_list"}, | ||||
|         "domainname": {"type": "string"}, | ||||
|         "entrypoint": { | ||||
|           "oneOf": [ | ||||
|             {"type": "string"}, | ||||
|             {"type": "array", "items": {"type": "string"}} | ||||
|           ] | ||||
|         }, | ||||
|         "env_file": {"$ref": "#/definitions/string_or_list"}, | ||||
|         "environment": {"$ref": "#/definitions/list_or_dict"}, | ||||
|  | ||||
|         "expose": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "type": ["string", "number"], | ||||
|             "format": "expose" | ||||
|           }, | ||||
|           "uniqueItems": true | ||||
|         }, | ||||
|         "extends": { | ||||
|           "oneOf": [ | ||||
|             {"type": "string"}, | ||||
|             { | ||||
|               "type": "object", | ||||
|  | ||||
|               "properties": { | ||||
|                 "service": {"type": "string"}, | ||||
|                 "file": {"type": "string"} | ||||
|               }, | ||||
|               "required": ["service"], | ||||
|               "additionalProperties": false | ||||
|             } | ||||
|           ] | ||||
|         }, | ||||
|         "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "group_add": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "type": ["string", "number"] | ||||
|           }, | ||||
|           "uniqueItems": true | ||||
|         }, | ||||
|         "healthcheck": {"$ref": "#/definitions/healthcheck"}, | ||||
|         "hostname": {"type": "string"}, | ||||
|         "image": {"type": "string"}, | ||||
|         "init": {"type": "boolean"}, | ||||
|         "ipc": {"type": "string"}, | ||||
|         "isolation": {"type": "string"}, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "logging": { | ||||
|           "type": "object", | ||||
|  | ||||
|           "properties": { | ||||
|             "driver": {"type": "string"}, | ||||
|             "options": { | ||||
|               "type": "object", | ||||
|               "patternProperties": { | ||||
|                 "^.+$": {"type": ["string", "number", "null"]} | ||||
|               } | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "mac_address": {"type": "string"}, | ||||
|         "mem_limit": {"type": ["number", "string"]}, | ||||
|         "mem_reservation": {"type": ["string", "integer"]}, | ||||
|         "mem_swappiness": {"type": "integer"}, | ||||
|         "memswap_limit": {"type": ["number", "string"]}, | ||||
|         "network_mode": {"type": "string"}, | ||||
|         "networks": { | ||||
|           "oneOf": [ | ||||
|             {"$ref": "#/definitions/list_of_strings"}, | ||||
|             { | ||||
|               "type": "object", | ||||
|               "patternProperties": { | ||||
|                 "^[a-zA-Z0-9._-]+$": { | ||||
|                   "oneOf": [ | ||||
|                     { | ||||
|                       "type": "object", | ||||
|                       "properties": { | ||||
|                         "aliases": {"$ref": "#/definitions/list_of_strings"}, | ||||
|                         "ipv4_address": {"type": "string"}, | ||||
|                         "ipv6_address": {"type": "string"}, | ||||
|                         "link_local_ips": {"$ref": "#/definitions/list_of_strings"}, | ||||
|                         "priority": {"type": "number"} | ||||
|                       }, | ||||
|                       "additionalProperties": false, | ||||
|                       "patternProperties": {"^x-": {}} | ||||
|                     }, | ||||
|                     {"type": "null"} | ||||
|                   ] | ||||
|                 } | ||||
|               }, | ||||
|               "additionalProperties": false | ||||
|             } | ||||
|           ] | ||||
|         }, | ||||
|         "oom_kill_disable": {"type": "boolean"}, | ||||
|         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000}, | ||||
|         "pid": {"type": ["string", "null"]}, | ||||
|         "pids_limit": {"type": ["number", "string"]}, | ||||
|         "platform": {"type": "string"}, | ||||
|         "ports": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "oneOf": [ | ||||
|               {"type": "number", "format": "ports"}, | ||||
|               {"type": "string", "format": "ports"}, | ||||
|               { | ||||
|                 "type": "object", | ||||
|                 "properties": { | ||||
|                   "mode": {"type": "string"}, | ||||
|                   "host_ip": {"type": "string"}, | ||||
|                   "target": {"type": "integer"}, | ||||
|                   "published": {"type": "integer"}, | ||||
|                   "protocol": {"type": "string"} | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             ] | ||||
|           }, | ||||
|           "uniqueItems": true | ||||
|         }, | ||||
|         "privileged": {"type": "boolean"}, | ||||
|         "profiles": {"$ref": "#/definitions/list_of_strings"}, | ||||
|         "pull_policy": {"type": "string", "enum": [ | ||||
|           "always", "never", "if_not_present", "build" | ||||
|         ]}, | ||||
|         "read_only": {"type": "boolean"}, | ||||
|         "restart": {"type": "string"}, | ||||
|         "runtime": { | ||||
|           "type": "string" | ||||
|         }, | ||||
|         "scale": { | ||||
|           "type": "integer" | ||||
|         }, | ||||
|         "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, | ||||
|         "shm_size": {"type": ["number", "string"]}, | ||||
|         "secrets": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "oneOf": [ | ||||
|               {"type": "string"}, | ||||
|               { | ||||
|                 "type": "object", | ||||
|                 "properties": { | ||||
|                   "source": {"type": "string"}, | ||||
|                   "target": {"type": "string"}, | ||||
|                   "uid": {"type": "string"}, | ||||
|                   "gid": {"type": "string"}, | ||||
|                   "mode": {"type": "number"} | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             ] | ||||
|           } | ||||
|         }, | ||||
|         "sysctls": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "stdin_open": {"type": "boolean"}, | ||||
|         "stop_grace_period": {"type": "string", "format": "duration"}, | ||||
|         "stop_signal": {"type": "string"}, | ||||
|         "tmpfs": {"$ref": "#/definitions/string_or_list"}, | ||||
|         "tty": {"type": "boolean"}, | ||||
|         "ulimits": { | ||||
|           "type": "object", | ||||
|           "patternProperties": { | ||||
|             "^[a-z]+$": { | ||||
|               "oneOf": [ | ||||
|                 {"type": "integer"}, | ||||
|                 { | ||||
|                   "type": "object", | ||||
|                   "properties": { | ||||
|                     "hard": {"type": "integer"}, | ||||
|                     "soft": {"type": "integer"} | ||||
|                   }, | ||||
|                   "required": ["soft", "hard"], | ||||
|                   "additionalProperties": false, | ||||
|                   "patternProperties": {"^x-": {}} | ||||
|                 } | ||||
|               ] | ||||
|             } | ||||
|           } | ||||
|         }, | ||||
|         "user": {"type": "string"}, | ||||
|         "userns_mode": {"type": "string"}, | ||||
|         "volumes": { | ||||
|           "type": "array", | ||||
|           "items": { | ||||
|             "oneOf": [ | ||||
|               {"type": "string"}, | ||||
|               { | ||||
|                 "type": "object", | ||||
|                 "required": ["type"], | ||||
|                 "properties": { | ||||
|                   "type": {"type": "string"}, | ||||
|                   "source": {"type": "string"}, | ||||
|                   "target": {"type": "string"}, | ||||
|                   "read_only": {"type": "boolean"}, | ||||
|                   "consistency": {"type": "string"}, | ||||
|                   "bind": { | ||||
|                     "type": "object", | ||||
|                     "properties": { | ||||
|                       "propagation": {"type": "string"}, | ||||
|                       "create_host_path": {"type": "boolean"} | ||||
|                     }, | ||||
|                     "additionalProperties": false, | ||||
|                     "patternProperties": {"^x-": {}} | ||||
|                   }, | ||||
|                   "volume": { | ||||
|                     "type": "object", | ||||
|                     "properties": { | ||||
|                       "nocopy": {"type": "boolean"} | ||||
|                     }, | ||||
|                     "additionalProperties": false, | ||||
|                     "patternProperties": {"^x-": {}} | ||||
|                   }, | ||||
|                   "tmpfs": { | ||||
|                     "type": "object", | ||||
|                     "properties": { | ||||
|                       "size": { | ||||
|                         "type": "integer", | ||||
|                         "minimum": 0 | ||||
|                       } | ||||
|                     }, | ||||
|                     "additionalProperties": false, | ||||
|                     "patternProperties": {"^x-": {}} | ||||
|                   } | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             ] | ||||
|           }, | ||||
|           "uniqueItems": true | ||||
|         }, | ||||
|         "volumes_from": { | ||||
|           "type": "array", | ||||
|           "items": {"type": "string"}, | ||||
|           "uniqueItems": true | ||||
|         }, | ||||
|         "working_dir": {"type": "string"} | ||||
|       }, | ||||
|       "patternProperties": {"^x-": {}}, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|  | ||||
|     "healthcheck": { | ||||
|       "id": "#/definitions/healthcheck", | ||||
|       "type": "object", | ||||
|       "properties": { | ||||
|         "disable": {"type": "boolean"}, | ||||
|         "interval": {"type": "string", "format": "duration"}, | ||||
|         "retries": {"type": "number"}, | ||||
|         "test": { | ||||
|           "oneOf": [ | ||||
|             {"type": "string"}, | ||||
|             {"type": "array", "items": {"type": "string"}} | ||||
|           ] | ||||
|         }, | ||||
|         "timeout": {"type": "string", "format": "duration"}, | ||||
|         "start_period": {"type": "string", "format": "duration"} | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|     "deployment": { | ||||
|       "id": "#/definitions/deployment", | ||||
|       "type": ["object", "null"], | ||||
|       "properties": { | ||||
|         "mode": {"type": "string"}, | ||||
|         "endpoint_mode": {"type": "string"}, | ||||
|         "replicas": {"type": "integer"}, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "rollback_config": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "parallelism": {"type": "integer"}, | ||||
|             "delay": {"type": "string", "format": "duration"}, | ||||
|             "failure_action": {"type": "string"}, | ||||
|             "monitor": {"type": "string", "format": "duration"}, | ||||
|             "max_failure_ratio": {"type": "number"}, | ||||
|             "order": {"type": "string", "enum": [ | ||||
|               "start-first", "stop-first" | ||||
|             ]} | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "update_config": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "parallelism": {"type": "integer"}, | ||||
|             "delay": {"type": "string", "format": "duration"}, | ||||
|             "failure_action": {"type": "string"}, | ||||
|             "monitor": {"type": "string", "format": "duration"}, | ||||
|             "max_failure_ratio": {"type": "number"}, | ||||
|             "order": {"type": "string", "enum": [ | ||||
|               "start-first", "stop-first" | ||||
|             ]} | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "resources": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "limits": { | ||||
|               "type": "object", | ||||
|               "properties": { | ||||
|                 "cpus": {"type": ["number", "string"]}, | ||||
|                 "memory": {"type": "string"} | ||||
|               }, | ||||
|               "additionalProperties": false, | ||||
|               "patternProperties": {"^x-": {}} | ||||
|             }, | ||||
|             "reservations": { | ||||
|               "type": "object", | ||||
|               "properties": { | ||||
|                 "cpus": {"type": ["number", "string"]}, | ||||
|                 "memory": {"type": "string"}, | ||||
|                 "generic_resources": {"$ref": "#/definitions/generic_resources"}, | ||||
|                 "devices": {"$ref": "#/definitions/devices"} | ||||
|               }, | ||||
|               "additionalProperties": false, | ||||
|               "patternProperties": {"^x-": {}} | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "restart_policy": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "condition": {"type": "string"}, | ||||
|             "delay": {"type": "string", "format": "duration"}, | ||||
|             "max_attempts": {"type": "integer"}, | ||||
|             "window": {"type": "string", "format": "duration"} | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "placement": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "constraints": {"type": "array", "items": {"type": "string"}}, | ||||
|             "preferences": { | ||||
|               "type": "array", | ||||
|               "items": { | ||||
|                 "type": "object", | ||||
|                 "properties": { | ||||
|                   "spread": {"type": "string"} | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             }, | ||||
|             "max_replicas_per_node": {"type": "integer"} | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         } | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|  | ||||
|     "generic_resources": { | ||||
|       "id": "#/definitions/generic_resources", | ||||
|       "type": "array", | ||||
|       "items": { | ||||
|         "type": "object", | ||||
|         "properties": { | ||||
|           "discrete_resource_spec": { | ||||
|             "type": "object", | ||||
|             "properties": { | ||||
|               "kind": {"type": "string"}, | ||||
|               "value": {"type": "number"} | ||||
|             }, | ||||
|             "additionalProperties": false, | ||||
|             "patternProperties": {"^x-": {}} | ||||
|           } | ||||
|         }, | ||||
|         "additionalProperties": false, | ||||
|         "patternProperties": {"^x-": {}} | ||||
|       } | ||||
|     }, | ||||
|  | ||||
|     "devices": { | ||||
|       "id": "#/definitions/devices", | ||||
|       "type": "array", | ||||
|       "items": { | ||||
|         "type": "object", | ||||
|         "properties": { | ||||
|           "capabilities": {"$ref": "#/definitions/list_of_strings"}, | ||||
|           "count": {"type": ["string", "integer"]}, | ||||
|           "device_ids": {"$ref": "#/definitions/list_of_strings"}, | ||||
|           "driver":{"type": "string"}, | ||||
|           "options":{"$ref": "#/definitions/list_or_dict"} | ||||
|         }, | ||||
|         "additionalProperties": false, | ||||
|         "patternProperties": {"^x-": {}} | ||||
|       } | ||||
|     }, | ||||
|  | ||||
|     "network": { | ||||
|       "id": "#/definitions/network", | ||||
|       "type": ["object", "null"], | ||||
|       "properties": { | ||||
|         "name": {"type": "string"}, | ||||
|         "driver": {"type": "string"}, | ||||
|         "driver_opts": { | ||||
|           "type": "object", | ||||
|           "patternProperties": { | ||||
|             "^.+$": {"type": ["string", "number"]} | ||||
|           } | ||||
|         }, | ||||
|         "ipam": { | ||||
|           "type": "object", | ||||
|           "properties": { | ||||
|             "driver": {"type": "string"}, | ||||
|             "config": { | ||||
|               "type": "array", | ||||
|               "items": { | ||||
|                 "type": "object", | ||||
|                 "properties": { | ||||
|                   "subnet": {"type": "string", "format": "subnet_ip_address"}, | ||||
|                   "ip_range": {"type": "string"}, | ||||
|                   "gateway": {"type": "string"}, | ||||
|                   "aux_addresses": { | ||||
|                     "type": "object", | ||||
|                     "additionalProperties": false, | ||||
|                     "patternProperties": {"^.+$": {"type": "string"}} | ||||
|                   } | ||||
|                 }, | ||||
|                 "additionalProperties": false, | ||||
|                 "patternProperties": {"^x-": {}} | ||||
|               } | ||||
|             }, | ||||
|             "options": { | ||||
|               "type": "object", | ||||
|               "additionalProperties": false, | ||||
|               "patternProperties": {"^.+$": {"type": "string"}} | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "external": { | ||||
|           "type": ["boolean", "object"], | ||||
|           "properties": { | ||||
|             "name": { | ||||
|               "deprecated": true, | ||||
|               "type": "string" | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "internal": {"type": "boolean"}, | ||||
|         "enable_ipv6": {"type": "boolean"}, | ||||
|         "attachable": {"type": "boolean"}, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"} | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|  | ||||
|     "volume": { | ||||
|       "id": "#/definitions/volume", | ||||
|       "type": ["object", "null"], | ||||
|       "properties": { | ||||
|         "name": {"type": "string"}, | ||||
|         "driver": {"type": "string"}, | ||||
|         "driver_opts": { | ||||
|           "type": "object", | ||||
|           "patternProperties": { | ||||
|             "^.+$": {"type": ["string", "number"]} | ||||
|           } | ||||
|         }, | ||||
|         "external": { | ||||
|           "type": ["boolean", "object"], | ||||
|           "properties": { | ||||
|             "name": { | ||||
|               "deprecated": true, | ||||
|               "type": "string" | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false, | ||||
|           "patternProperties": {"^x-": {}} | ||||
|         }, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"} | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|  | ||||
|     "secret": { | ||||
|       "id": "#/definitions/secret", | ||||
|       "type": "object", | ||||
|       "properties": { | ||||
|         "name": {"type": "string"}, | ||||
|         "file": {"type": "string"}, | ||||
|         "external": { | ||||
|           "type": ["boolean", "object"], | ||||
|           "properties": { | ||||
|             "name": {"type": "string"} | ||||
|           } | ||||
|         }, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "driver": {"type": "string"}, | ||||
|         "driver_opts": { | ||||
|           "type": "object", | ||||
|           "patternProperties": { | ||||
|             "^.+$": {"type": ["string", "number"]} | ||||
|           } | ||||
|         }, | ||||
|         "template_driver": {"type": "string"} | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|  | ||||
|     "config": { | ||||
|       "id": "#/definitions/config", | ||||
|       "type": "object", | ||||
|       "properties": { | ||||
|         "name": {"type": "string"}, | ||||
|         "file": {"type": "string"}, | ||||
|         "external": { | ||||
|           "type": ["boolean", "object"], | ||||
|           "properties": { | ||||
|             "name": { | ||||
|               "deprecated": true, | ||||
|               "type": "string" | ||||
|             } | ||||
|           } | ||||
|         }, | ||||
|         "labels": {"$ref": "#/definitions/list_or_dict"}, | ||||
|         "template_driver": {"type": "string"} | ||||
|       }, | ||||
|       "additionalProperties": false, | ||||
|       "patternProperties": {"^x-": {}} | ||||
|     }, | ||||
|  | ||||
|     "string_or_list": { | ||||
|       "oneOf": [ | ||||
|         {"type": "string"}, | ||||
|         {"$ref": "#/definitions/list_of_strings"} | ||||
|       ] | ||||
|     }, | ||||
|  | ||||
|     "list_of_strings": { | ||||
|       "type": "array", | ||||
|       "items": {"type": "string"}, | ||||
|       "uniqueItems": true | ||||
|     }, | ||||
|  | ||||
|     "list_or_dict": { | ||||
|       "oneOf": [ | ||||
|         { | ||||
|           "type": "object", | ||||
|           "patternProperties": { | ||||
|             ".+": { | ||||
|               "type": ["string", "number", "null"] | ||||
|             } | ||||
|           }, | ||||
|           "additionalProperties": false | ||||
|         }, | ||||
|         {"type": "array", "items": {"type": "string"}, "uniqueItems": true} | ||||
|       ] | ||||
|     }, | ||||
|  | ||||
|     "blkio_limit": { | ||||
|       "type": "object", | ||||
|       "properties": { | ||||
|         "path": {"type": "string"}, | ||||
|         "rate": {"type": ["integer", "string"]} | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|     "blkio_weight": { | ||||
|       "type": "object", | ||||
|       "properties": { | ||||
|         "path": {"type": "string"}, | ||||
|         "weight": {"type": "integer"} | ||||
|       }, | ||||
|       "additionalProperties": false | ||||
|     }, | ||||
|  | ||||
|     "constraints": { | ||||
|       "service": { | ||||
|         "id": "#/definitions/constraints/service", | ||||
|         "anyOf": [ | ||||
|           {"required": ["build"]}, | ||||
|           {"required": ["image"]} | ||||
|         ], | ||||
|         "properties": { | ||||
|           "build": { | ||||
|             "required": ["context"] | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
| @@ -1,14 +1,30 @@ | ||||
| package schema | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
| //go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package schema | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/pkg/errors" | ||||
| 	"github.com/xeipuuv/gojsonschema" | ||||
| 
 | ||||
| 	// Enable support for embedded static resources | ||||
| 	_ "embed" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| @@ -40,32 +56,13 @@ func init() { | ||||
| 	gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) | ||||
| } | ||||
| 
 | ||||
| // Version returns the version of the config, defaulting to version 1.0 | ||||
| func Version(config map[string]interface{}) string { | ||||
| 	version, ok := config[versionField] | ||||
| 	if !ok { | ||||
| 		return defaultVersion | ||||
| 	} | ||||
| 	return normalizeVersion(fmt.Sprintf("%v", version)) | ||||
| } | ||||
| 
 | ||||
| func normalizeVersion(version string) string { | ||||
| 	switch version { | ||||
| 	case "3": | ||||
| 		return "3.0" | ||||
| 	default: | ||||
| 		return version | ||||
| 	} | ||||
| } | ||||
| // Schema is the compose-spec JSON schema | ||||
| //go:embed compose-spec.json | ||||
| var Schema string | ||||
| 
 | ||||
| // Validate uses the jsonschema to validate the configuration | ||||
| func Validate(config map[string]interface{}, version string) error { | ||||
| 	schemaData, err := _escFSByte(false, fmt.Sprintf("/data/config_schema_v%s.json", version)) | ||||
| 	if err != nil { | ||||
| 		return errors.Errorf("unsupported Compose file version: %s", version) | ||||
| 	} | ||||
| 
 | ||||
| 	schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) | ||||
| func Validate(config map[string]interface{}) error { | ||||
| 	schemaLoader := gojsonschema.NewStringLoader(Schema) | ||||
| 	dataLoader := gojsonschema.NewGoLoader(config) | ||||
| 
 | ||||
| 	result, err := gojsonschema.Validate(schemaLoader, dataLoader) | ||||
| @@ -1,3 +1,19 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package template | ||||
| 
 | ||||
| import ( | ||||
| @@ -95,21 +111,21 @@ func Substitute(template string, mapping Mapping) (string, error) { | ||||
| 
 | ||||
| // ExtractVariables returns a map of all the variables defined in the specified | ||||
| // composefile (dict representation) and their default value if any. | ||||
| func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]string { | ||||
| func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable { | ||||
| 	if pattern == nil { | ||||
| 		pattern = defaultPattern | ||||
| 	} | ||||
| 	return recurseExtract(configDict, pattern) | ||||
| } | ||||
| 
 | ||||
| func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]string { | ||||
| 	m := map[string]string{} | ||||
| func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable { | ||||
| 	m := map[string]Variable{} | ||||
| 
 | ||||
| 	switch value := value.(type) { | ||||
| 	case string: | ||||
| 		if values, is := extractVariable(value, pattern); is { | ||||
| 			for _, v := range values { | ||||
| 				m[v.name] = v.value | ||||
| 				m[v.Name] = v | ||||
| 			} | ||||
| 		} | ||||
| 	case map[string]interface{}: | ||||
| @@ -124,7 +140,7 @@ func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]string | ||||
| 		for _, elem := range value { | ||||
| 			if values, is := extractVariable(elem, pattern); is { | ||||
| 				for _, v := range values { | ||||
| 					m[v.name] = v.value | ||||
| 					m[v.Name] = v | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| @@ -133,21 +149,22 @@ func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]string | ||||
| 	return m | ||||
| } | ||||
| 
 | ||||
| type extractedValue struct { | ||||
| 	name  string | ||||
| 	value string | ||||
| type Variable struct { | ||||
| 	Name         string | ||||
| 	DefaultValue string | ||||
| 	Required     bool | ||||
| } | ||||
| 
 | ||||
| func extractVariable(value interface{}, pattern *regexp.Regexp) ([]extractedValue, bool) { | ||||
| func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) { | ||||
| 	sValue, ok := value.(string) | ||||
| 	if !ok { | ||||
| 		return []extractedValue{}, false | ||||
| 		return []Variable{}, false | ||||
| 	} | ||||
| 	matches := pattern.FindAllStringSubmatch(sValue, -1) | ||||
| 	if len(matches) == 0 { | ||||
| 		return []extractedValue{}, false | ||||
| 		return []Variable{}, false | ||||
| 	} | ||||
| 	values := []extractedValue{} | ||||
| 	values := []Variable{} | ||||
| 	for _, match := range matches { | ||||
| 		groups := matchGroups(match, pattern) | ||||
| 		if escaped := groups["escaped"]; escaped != "" { | ||||
| @@ -159,17 +176,24 @@ func extractVariable(value interface{}, pattern *regexp.Regexp) ([]extractedValu | ||||
| 		} | ||||
| 		name := val | ||||
| 		var defaultValue string | ||||
| 		var required bool | ||||
| 		switch { | ||||
| 		case strings.Contains(val, ":?"): | ||||
| 			name, _ = partition(val, ":?") | ||||
| 			required = true | ||||
| 		case strings.Contains(val, "?"): | ||||
| 			name, _ = partition(val, "?") | ||||
| 			required = true | ||||
| 		case strings.Contains(val, ":-"): | ||||
| 			name, defaultValue = partition(val, ":-") | ||||
| 		case strings.Contains(val, "-"): | ||||
| 			name, defaultValue = partition(val, "-") | ||||
| 		} | ||||
| 		values = append(values, extractedValue{name: name, value: defaultValue}) | ||||
| 		values = append(values, Variable{ | ||||
| 			Name:         name, | ||||
| 			DefaultValue: defaultValue, | ||||
| 			Required:     required, | ||||
| 		}) | ||||
| 	} | ||||
| 	return values, len(values) > 0 | ||||
| } | ||||
							
								
								
									
										109
									
								
								vendor/github.com/compose-spec/compose-go/types/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								vendor/github.com/compose-spec/compose-go/types/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
|  | ||||
| package types | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
|  | ||||
| 	"github.com/mitchellh/mapstructure" | ||||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
|  | ||||
| // ConfigDetails are the details about a group of ConfigFiles | ||||
| type ConfigDetails struct { | ||||
| 	Version     string | ||||
| 	WorkingDir  string | ||||
| 	ConfigFiles []ConfigFile | ||||
| 	Environment map[string]string | ||||
| } | ||||
|  | ||||
| // LookupEnv provides a lookup function for environment variables | ||||
| func (cd ConfigDetails) LookupEnv(key string) (string, bool) { | ||||
| 	v, ok := cd.Environment[key] | ||||
| 	if !ok { | ||||
| 		logrus.Warnf("The %s variable is not set. Defaulting to a blank string.", key) | ||||
| 	} | ||||
| 	return v, ok | ||||
| } | ||||
|  | ||||
| // ConfigFile is a filename and the contents of the file as a Dict | ||||
| type ConfigFile struct { | ||||
| 	// Filename is the name of the yaml configuration file | ||||
| 	Filename string | ||||
| 	// Content is the raw yaml content. Will be loaded from Filename if not set | ||||
| 	Content []byte | ||||
| 	// Config if the yaml tree for this config file. Will be parsed from Content if not set | ||||
| 	Config map[string]interface{} | ||||
| } | ||||
|  | ||||
| // Config is a full compose file configuration and model | ||||
| type Config struct { | ||||
| 	Filename   string     `yaml:"-" json:"-"` | ||||
| 	Services   Services   `json:"services"` | ||||
| 	Networks   Networks   `yaml:",omitempty" json:"networks,omitempty"` | ||||
| 	Volumes    Volumes    `yaml:",omitempty" json:"volumes,omitempty"` | ||||
| 	Secrets    Secrets    `yaml:",omitempty" json:"secrets,omitempty"` | ||||
| 	Configs    Configs    `yaml:",omitempty" json:"configs,omitempty"` | ||||
| 	Extensions Extensions `yaml:",inline" json:"-"` | ||||
| } | ||||
|  | ||||
| // Volumes is a map of VolumeConfig | ||||
| type Volumes map[string]VolumeConfig | ||||
|  | ||||
| // Networks is a map of NetworkConfig | ||||
| type Networks map[string]NetworkConfig | ||||
|  | ||||
| // Secrets is a map of SecretConfig | ||||
| type Secrets map[string]SecretConfig | ||||
|  | ||||
| // Configs is a map of ConfigObjConfig | ||||
| type Configs map[string]ConfigObjConfig | ||||
|  | ||||
| // Extensions is a map of custom extension | ||||
| type Extensions map[string]interface{} | ||||
|  | ||||
| // MarshalJSON makes Config implement json.Marshaler | ||||
| func (c Config) MarshalJSON() ([]byte, error) { | ||||
| 	m := map[string]interface{}{ | ||||
| 		"services": c.Services, | ||||
| 	} | ||||
|  | ||||
| 	if len(c.Networks) > 0 { | ||||
| 		m["networks"] = c.Networks | ||||
| 	} | ||||
| 	if len(c.Volumes) > 0 { | ||||
| 		m["volumes"] = c.Volumes | ||||
| 	} | ||||
| 	if len(c.Secrets) > 0 { | ||||
| 		m["secrets"] = c.Secrets | ||||
| 	} | ||||
| 	if len(c.Configs) > 0 { | ||||
| 		m["configs"] = c.Configs | ||||
| 	} | ||||
| 	for k, v := range c.Extensions { | ||||
| 		m[k] = v | ||||
| 	} | ||||
| 	return json.Marshal(m) | ||||
| } | ||||
|  | ||||
| func (e Extensions) Get(name string, target interface{}) (bool, error) { | ||||
| 	if v, ok := e[name]; ok { | ||||
| 		err := mapstructure.Decode(v, target) | ||||
| 		return true, err | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
							
								
								
									
										356
									
								
								vendor/github.com/compose-spec/compose-go/types/project.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										356
									
								
								vendor/github.com/compose-spec/compose-go/types/project.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,356 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
|  | ||||
| package types | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
|  | ||||
| 	"github.com/distribution/distribution/v3/reference" | ||||
| 	"github.com/opencontainers/go-digest" | ||||
| 	"golang.org/x/sync/errgroup" | ||||
| ) | ||||
|  | ||||
| // Project is the result of loading a set of compose files | ||||
| type Project struct { | ||||
| 	Name         string            `yaml:"-" json:"-"` | ||||
| 	WorkingDir   string            `yaml:"-" json:"-"` | ||||
| 	Services     Services          `json:"services"` | ||||
| 	Networks     Networks          `yaml:",omitempty" json:"networks,omitempty"` | ||||
| 	Volumes      Volumes           `yaml:",omitempty" json:"volumes,omitempty"` | ||||
| 	Secrets      Secrets           `yaml:",omitempty" json:"secrets,omitempty"` | ||||
| 	Configs      Configs           `yaml:",omitempty" json:"configs,omitempty"` | ||||
| 	Extensions   Extensions        `yaml:",inline" json:"-"` // https://github.com/golang/go/issues/6213 | ||||
| 	ComposeFiles []string          `yaml:"-" json:"-"` | ||||
| 	Environment  map[string]string `yaml:"-" json:"-"` | ||||
|  | ||||
| 	// DisabledServices track services which have been disable as profile is not active | ||||
| 	DisabledServices Services `yaml:"-" json:"-"` | ||||
| } | ||||
|  | ||||
| // ServiceNames return names for all services in this Compose config | ||||
| func (p Project) ServiceNames() []string { | ||||
| 	names := []string{} | ||||
| 	for _, s := range p.Services { | ||||
| 		names = append(names, s.Name) | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	return names | ||||
| } | ||||
|  | ||||
| // VolumeNames return names for all volumes in this Compose config | ||||
| func (p Project) VolumeNames() []string { | ||||
| 	names := []string{} | ||||
| 	for k := range p.Volumes { | ||||
| 		names = append(names, k) | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	return names | ||||
| } | ||||
|  | ||||
| // NetworkNames return names for all volumes in this Compose config | ||||
| func (p Project) NetworkNames() []string { | ||||
| 	names := []string{} | ||||
| 	for k := range p.Networks { | ||||
| 		names = append(names, k) | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	return names | ||||
| } | ||||
|  | ||||
| // SecretNames return names for all secrets in this Compose config | ||||
| func (p Project) SecretNames() []string { | ||||
| 	names := []string{} | ||||
| 	for k := range p.Secrets { | ||||
| 		names = append(names, k) | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	return names | ||||
| } | ||||
|  | ||||
| // ConfigNames return names for all configs in this Compose config | ||||
| func (p Project) ConfigNames() []string { | ||||
| 	names := []string{} | ||||
| 	for k := range p.Configs { | ||||
| 		names = append(names, k) | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	return names | ||||
| } | ||||
|  | ||||
| func (p Project) GetByContainerName(names ...string) (Services, error) { | ||||
| 	if len(names) == 0 { | ||||
| 		return p.Services, nil | ||||
| 	} | ||||
| 	services := Services{} | ||||
| outLoop: | ||||
| 	for _, name := range names { | ||||
| 		for _, s := range p.Services { | ||||
| 			if name == s.ContainerName { | ||||
| 				services = append(services, s) | ||||
| 				continue outLoop | ||||
| 			} | ||||
| 		} | ||||
| 		return nil, fmt.Errorf("service with container_name %q could not be found", name) | ||||
| 	} | ||||
| 	return services, nil | ||||
| } | ||||
|  | ||||
| // GetServices retrieve services by names, or return all services if no name specified | ||||
| func (p Project) GetServices(names ...string) (Services, error) { | ||||
| 	if len(names) == 0 { | ||||
| 		return p.Services, nil | ||||
| 	} | ||||
| 	services := Services{} | ||||
| 	for _, name := range names { | ||||
| 		var serviceConfig *ServiceConfig | ||||
| 		for _, s := range p.Services { | ||||
| 			if s.Name == name { | ||||
| 				serviceConfig = &s | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if serviceConfig == nil { | ||||
| 			return services, fmt.Errorf("no such service: %s", name) | ||||
| 		} | ||||
| 		services = append(services, *serviceConfig) | ||||
| 	} | ||||
| 	return services, nil | ||||
| } | ||||
|  | ||||
| // GetService retrieve a specific service by name | ||||
| func (p Project) GetService(name string) (ServiceConfig, error) { | ||||
| 	services, err := p.GetServices(name) | ||||
| 	if err != nil { | ||||
| 		return ServiceConfig{}, err | ||||
| 	} | ||||
| 	if len(services) == 0 { | ||||
| 		return ServiceConfig{}, fmt.Errorf("no such service: %s", name) | ||||
| 	} | ||||
| 	return services[0], nil | ||||
| } | ||||
|  | ||||
| func (p Project) AllServices() Services { | ||||
| 	var all Services | ||||
| 	all = append(all, p.Services...) | ||||
| 	all = append(all, p.DisabledServices...) | ||||
| 	return all | ||||
| } | ||||
|  | ||||
| type ServiceFunc func(service ServiceConfig) error | ||||
|  | ||||
| // WithServices run ServiceFunc on each service and dependencies in dependency order | ||||
| func (p Project) WithServices(names []string, fn ServiceFunc) error { | ||||
| 	return p.withServices(names, fn, map[string]bool{}) | ||||
| } | ||||
|  | ||||
| func (p Project) withServices(names []string, fn ServiceFunc, done map[string]bool) error { | ||||
| 	services, err := p.GetServices(names...) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	for _, service := range services { | ||||
| 		if done[service.Name] { | ||||
| 			continue | ||||
| 		} | ||||
| 		dependencies := service.GetDependencies() | ||||
| 		if len(dependencies) > 0 { | ||||
| 			err := p.withServices(dependencies, fn, done) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		if err := fn(service); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		done[service.Name] = true | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // RelativePath resolve a relative path based project's working directory | ||||
| func (p *Project) RelativePath(path string) string { | ||||
| 	if path[0] == '~' { | ||||
| 		home, _ := os.UserHomeDir() | ||||
| 		path = filepath.Join(home, path[1:]) | ||||
| 	} | ||||
| 	if filepath.IsAbs(path) { | ||||
| 		return path | ||||
| 	} | ||||
| 	return filepath.Join(p.WorkingDir, path) | ||||
| } | ||||
|  | ||||
| // HasProfile return true if service has no profile declared or has at least one profile matching | ||||
| func (service ServiceConfig) HasProfile(profiles []string) bool { | ||||
| 	if len(service.Profiles) == 0 { | ||||
| 		return true | ||||
| 	} | ||||
| 	for _, p := range profiles { | ||||
| 		for _, sp := range service.Profiles { | ||||
| 			if sp == p { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // GetProfiles retrieve the profiles implicitly enabled by explicitly targeting selected services | ||||
| func (s Services) GetProfiles() []string { | ||||
| 	set := map[string]struct{}{} | ||||
| 	for _, service := range s { | ||||
| 		for _, p := range service.Profiles { | ||||
| 			set[p] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
| 	var profiles []string | ||||
| 	for k := range set { | ||||
| 		profiles = append(profiles, k) | ||||
| 	} | ||||
| 	return profiles | ||||
| } | ||||
|  | ||||
| // ApplyProfiles disables service which don't match selected profiles | ||||
| func (p *Project) ApplyProfiles(profiles []string) { | ||||
| 	var enabled, disabled Services | ||||
| 	for _, service := range p.Services { | ||||
| 		if service.HasProfile(profiles) { | ||||
| 			enabled = append(enabled, service) | ||||
| 		} else { | ||||
| 			disabled = append(disabled, service) | ||||
| 		} | ||||
| 	} | ||||
| 	p.Services = enabled | ||||
| 	p.DisabledServices = disabled | ||||
| } | ||||
|  | ||||
| // WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services | ||||
| func (p *Project) WithoutUnnecessaryResources() { | ||||
| 	requiredNetworks := map[string]struct{}{} | ||||
| 	requiredVolumes := map[string]struct{}{} | ||||
| 	requiredSecrets := map[string]struct{}{} | ||||
| 	requiredConfigs := map[string]struct{}{} | ||||
| 	for _, s := range p.Services { | ||||
| 		for k := range s.Networks { | ||||
| 			requiredNetworks[k] = struct{}{} | ||||
| 		} | ||||
| 		for _, v := range s.Volumes { | ||||
| 			if v.Type != VolumeTypeVolume || v.Source == "" { | ||||
| 				continue | ||||
| 			} | ||||
| 			requiredVolumes[v.Source] = struct{}{} | ||||
| 		} | ||||
| 		for _, v := range s.Secrets { | ||||
| 			requiredSecrets[v.Source] = struct{}{} | ||||
| 		} | ||||
| 		for _, v := range s.Configs { | ||||
| 			requiredConfigs[v.Source] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	networks := Networks{} | ||||
| 	for k := range requiredNetworks { | ||||
| 		networks[k] = p.Networks[k] | ||||
| 	} | ||||
| 	p.Networks = networks | ||||
|  | ||||
| 	volumes := Volumes{} | ||||
| 	for k := range requiredVolumes { | ||||
| 		volumes[k] = p.Volumes[k] | ||||
| 	} | ||||
| 	p.Volumes = volumes | ||||
|  | ||||
| 	secrets := Secrets{} | ||||
| 	for k := range requiredSecrets { | ||||
| 		secrets[k] = p.Secrets[k] | ||||
| 	} | ||||
| 	p.Secrets = secrets | ||||
|  | ||||
| 	configs := Configs{} | ||||
| 	for k := range requiredConfigs { | ||||
| 		configs[k] = p.Configs[k] | ||||
| 	} | ||||
| 	p.Configs = configs | ||||
| } | ||||
|  | ||||
| // ForServices restrict the project model to a subset of services | ||||
| func (p *Project) ForServices(names []string) error { | ||||
| 	if len(names) == 0 { | ||||
| 		// All services | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	set := map[string]struct{}{} | ||||
| 	err := p.WithServices(names, func(service ServiceConfig) error { | ||||
| 		set[service.Name] = struct{}{} | ||||
| 		return nil | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// Disable all services which are not explicit target or dependencies | ||||
| 	var enabled Services | ||||
| 	for _, s := range p.Services { | ||||
| 		if _, ok := set[s.Name]; ok { | ||||
| 			enabled = append(enabled, s) | ||||
| 		} else { | ||||
| 			p.DisabledServices = append(p.DisabledServices, s) | ||||
| 		} | ||||
| 	} | ||||
| 	p.Services = enabled | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ResolveImages updates services images to include digest computed by a resolver function | ||||
| func (p *Project) ResolveImages(resolver func(named reference.Named) (digest.Digest, error)) error { | ||||
| 	eg := errgroup.Group{} | ||||
| 	for i, s := range p.Services { | ||||
| 		idx := i | ||||
| 		service := s | ||||
|  | ||||
| 		if service.Image == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		eg.Go(func() error { | ||||
| 			named, err := reference.ParseDockerRef(service.Image) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			if _, ok := named.(reference.Canonical); !ok { | ||||
| 				// image is named but not digested reference | ||||
| 				digest, err := resolver(named) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
|  | ||||
| 				named, err = reference.WithDigest(named, digest) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			service.Image = named.String() | ||||
| 			p.Services[idx] = service | ||||
| 			return nil | ||||
| 		}) | ||||
| 	} | ||||
| 	return eg.Wait() | ||||
| } | ||||
| @@ -1,65 +1,31 @@ | ||||
| /* | ||||
|    Copyright 2020 The Compose Specification Authors. | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| package types | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/docker/go-connections/nat" | ||||
| ) | ||||
| 
 | ||||
| // UnsupportedProperties not yet supported by this implementation of the compose file | ||||
| var UnsupportedProperties = []string{ | ||||
| 	"build", | ||||
| 	"cgroupns_mode", | ||||
| 	"cgroup_parent", | ||||
| 	"devices", | ||||
| 	"domainname", | ||||
| 	"external_links", | ||||
| 	"ipc", | ||||
| 	"links", | ||||
| 	"mac_address", | ||||
| 	"network_mode", | ||||
| 	"pid", | ||||
| 	"privileged", | ||||
| 	"restart", | ||||
| 	"security_opt", | ||||
| 	"shm_size", | ||||
| 	"userns_mode", | ||||
| } | ||||
| 
 | ||||
| // DeprecatedProperties that were removed from the v3 format, but their | ||||
| // use should not impact the behaviour of the application. | ||||
| var DeprecatedProperties = map[string]string{ | ||||
| 	"container_name": "Setting the container name is not supported.", | ||||
| 	"expose":         "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", | ||||
| } | ||||
| 
 | ||||
| // ForbiddenProperties that are not supported in this implementation of the | ||||
| // compose file. | ||||
| var ForbiddenProperties = map[string]string{ | ||||
| 	"extends":       "Support for `extends` is not implemented yet.", | ||||
| 	"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", | ||||
| 	"volumes_from":  "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", | ||||
| 	"cpu_quota":     "Set resource limits using deploy.resources", | ||||
| 	"cpu_shares":    "Set resource limits using deploy.resources", | ||||
| 	"cpuset":        "Set resource limits using deploy.resources", | ||||
| 	"mem_limit":     "Set resource limits using deploy.resources", | ||||
| 	"memswap_limit": "Set resource limits using deploy.resources", | ||||
| } | ||||
| 
 | ||||
| // ConfigFile is a filename and the contents of the file as a Dict | ||||
| type ConfigFile struct { | ||||
| 	Filename string | ||||
| 	Config   map[string]interface{} | ||||
| } | ||||
| 
 | ||||
| // ConfigDetails are the details about a group of ConfigFiles | ||||
| type ConfigDetails struct { | ||||
| 	Version     string | ||||
| 	WorkingDir  string | ||||
| 	ConfigFiles []ConfigFile | ||||
| 	Environment map[string]string | ||||
| } | ||||
| 
 | ||||
| // Duration is a thin wrapper around time.Duration with improved JSON marshalling | ||||
| type Duration time.Duration | ||||
| 
 | ||||
| @@ -86,47 +52,14 @@ func (d Duration) MarshalYAML() (interface{}, error) { | ||||
| 	return d.String(), nil | ||||
| } | ||||
| 
 | ||||
| // LookupEnv provides a lookup function for environment variables | ||||
| func (cd ConfigDetails) LookupEnv(key string) (string, bool) { | ||||
| 	v, ok := cd.Environment[key] | ||||
| 	return v, ok | ||||
| func (d *Duration) UnmarshalJSON(b []byte) error { | ||||
| 	s := strings.Trim(string(b), "\"") | ||||
| 	timeDuration, err := time.ParseDuration(s) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| // Config is a full compose file configuration | ||||
| type Config struct { | ||||
| 	Filename string                     `yaml:"-" json:"-"` | ||||
| 	Version  string                     `json:"version"` | ||||
| 	Services Services                   `json:"services"` | ||||
| 	Networks map[string]NetworkConfig   `yaml:",omitempty" json:"networks,omitempty"` | ||||
| 	Volumes  map[string]VolumeConfig    `yaml:",omitempty" json:"volumes,omitempty"` | ||||
| 	Secrets  map[string]SecretConfig    `yaml:",omitempty" json:"secrets,omitempty"` | ||||
| 	Configs  map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` | ||||
| 	Extras   map[string]interface{}     `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // MarshalJSON makes Config implement json.Marshaler | ||||
| func (c Config) MarshalJSON() ([]byte, error) { | ||||
| 	m := map[string]interface{}{ | ||||
| 		"version":  c.Version, | ||||
| 		"services": c.Services, | ||||
| 	} | ||||
| 
 | ||||
| 	if len(c.Networks) > 0 { | ||||
| 		m["networks"] = c.Networks | ||||
| 	} | ||||
| 	if len(c.Volumes) > 0 { | ||||
| 		m["volumes"] = c.Volumes | ||||
| 	} | ||||
| 	if len(c.Secrets) > 0 { | ||||
| 		m["secrets"] = c.Secrets | ||||
| 	} | ||||
| 	if len(c.Configs) > 0 { | ||||
| 		m["configs"] = c.Configs | ||||
| 	} | ||||
| 	for k, v := range c.Extras { | ||||
| 		m[k] = v | ||||
| 	} | ||||
| 	return json.Marshal(m) | ||||
| 	*d = Duration(timeDuration) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Services is a list of ServiceConfig | ||||
| @@ -153,28 +86,42 @@ func (s Services) MarshalJSON() ([]byte, error) { | ||||
| // ServiceConfig is the configuration of one service | ||||
| type ServiceConfig struct { | ||||
| 	Name     string   `yaml:"-" json:"-"` | ||||
| 	Profiles []string `mapstructure:"profiles" yaml:"profiles,omitempty" json:"profiles,omitempty"` | ||||
| 
 | ||||
| 	Build           BuildConfig                      `yaml:",omitempty" json:"build,omitempty"` | ||||
| 	Build           *BuildConfig                     `yaml:",omitempty" json:"build,omitempty"` | ||||
| 	BlkioConfig     *BlkioConfig                     `yaml:",omitempty" json:"blkio_config,omitempty"` | ||||
| 	CapAdd          []string                         `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"` | ||||
| 	CapDrop         []string                         `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` | ||||
| 	CgroupNSMode    string                           `mapstructure:"cgroupns_mode" yaml:"cgroupns_mode,omitempty" json:"cgroupns_mode,omitempty"` | ||||
| 	CgroupParent    string                           `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` | ||||
| 	CPUCount        int64                            `mapstructure:"cpu_count" yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` | ||||
| 	CPUPercent      float32                          `mapstructure:"cpu_percent" yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` | ||||
| 	CPUPeriod       int64                            `mapstructure:"cpu_period" yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` | ||||
| 	CPUQuota        int64                            `mapstructure:"cpu_quota" yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` | ||||
| 	CPURTPeriod     int64                            `mapstructure:"cpu_rt_period" yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` | ||||
| 	CPURTRuntime    int64                            `mapstructure:"cpu_rt_runtime" yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` | ||||
| 	CPUS            float32                          `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` | ||||
| 	CPUSet          string                           `mapstructure:"cpuset" yaml:"cpuset,omitempty" json:"cpuset,omitempty"` | ||||
| 	CPUShares       int64                            `mapstructure:"cpu_shares" yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` | ||||
| 	Command         ShellCommand                     `yaml:",omitempty" json:"command,omitempty"` | ||||
| 	Configs         []ServiceConfigObjConfig         `yaml:",omitempty" json:"configs,omitempty"` | ||||
| 	ContainerName   string                           `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"` | ||||
| 	CredentialSpec  CredentialSpecConfig             `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` | ||||
| 	DependsOn       []string                         `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"` | ||||
| 	Deploy          DeployConfig                     `yaml:",omitempty" json:"deploy,omitempty"` | ||||
| 	CredentialSpec  *CredentialSpecConfig            `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` | ||||
| 	DependsOn       DependsOnConfig                  `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"` | ||||
| 	Deploy          *DeployConfig                    `yaml:",omitempty" json:"deploy,omitempty"` | ||||
| 	Devices         []string                         `yaml:",omitempty" json:"devices,omitempty"` | ||||
| 	DNS             StringList                       `yaml:",omitempty" json:"dns,omitempty"` | ||||
| 	DNSOpts         []string                         `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` | ||||
| 	DNSSearch       StringList                       `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"` | ||||
| 	Dockerfile      string                           `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` | ||||
| 	DomainName      string                           `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"` | ||||
| 	Entrypoint      ShellCommand                     `yaml:",omitempty" json:"entrypoint,omitempty"` | ||||
| 	Environment     MappingWithEquals                `yaml:",omitempty" json:"environment,omitempty"` | ||||
| 	EnvFile         StringList                       `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"` | ||||
| 	Expose          StringOrNumberList               `yaml:",omitempty" json:"expose,omitempty"` | ||||
| 	Extends         ExtendsConfig                    `yaml:"extends,omitempty" json:"extends,omitempty"` | ||||
| 	ExternalLinks   []string                         `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"` | ||||
| 	ExtraHosts      HostsList                        `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` | ||||
| 	GroupAdd        []string                         `mapstructure:"group_app" yaml:"group_add,omitempty" json:"group_add,omitempty"` | ||||
| 	Hostname        string                           `yaml:",omitempty" json:"hostname,omitempty"` | ||||
| 	HealthCheck     *HealthCheckConfig               `yaml:",omitempty" json:"healthcheck,omitempty"` | ||||
| 	Image           string                           `yaml:",omitempty" json:"image,omitempty"` | ||||
| @@ -184,17 +131,31 @@ type ServiceConfig struct { | ||||
| 	Labels          Labels                           `yaml:",omitempty" json:"labels,omitempty"` | ||||
| 	Links           []string                         `yaml:",omitempty" json:"links,omitempty"` | ||||
| 	Logging         *LoggingConfig                   `yaml:",omitempty" json:"logging,omitempty"` | ||||
| 	LogDriver       string                           `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"` | ||||
| 	LogOpt          map[string]string                `mapstructure:"log_opt" yaml:"log_opt,omitempty" json:"log_opt,omitempty"` | ||||
| 	MemLimit        UnitBytes                        `mapstructure:"mem_limit" yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` | ||||
| 	MemReservation  UnitBytes                        `mapstructure:"mem_reservation" yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` | ||||
| 	MemSwapLimit    UnitBytes                        `mapstructure:"memswap_limit" yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` | ||||
| 	MemSwappiness   UnitBytes                        `mapstructure:"mem_swappiness" yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` | ||||
| 	MacAddress      string                           `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"` | ||||
| 	Net             string                           `yaml:"net,omitempty" json:"net,omitempty"` | ||||
| 	NetworkMode     string                           `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"` | ||||
| 	Networks        map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"` | ||||
| 	OomKillDisable  bool                             `mapstructure:"oom_kill_disable" yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` | ||||
| 	OomScoreAdj     int64                            `mapstructure:"oom_score_adj" yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` | ||||
| 	Pid             string                           `yaml:",omitempty" json:"pid,omitempty"` | ||||
| 	PidsLimit       int64                            `mapstructure:"pids_limit" yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` | ||||
| 	Platform        string                           `yaml:",omitempty" json:"platform,omitempty"` | ||||
| 	Ports           []ServicePortConfig              `yaml:",omitempty" json:"ports,omitempty"` | ||||
| 	Privileged      bool                             `yaml:",omitempty" json:"privileged,omitempty"` | ||||
| 	PullPolicy      string                           `mapstructure:"pull_policy" yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` | ||||
| 	ReadOnly        bool                             `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` | ||||
| 	Restart         string                           `yaml:",omitempty" json:"restart,omitempty"` | ||||
| 	Runtime         string                           `yaml:",omitempty" json:"runtime,omitempty"` | ||||
| 	Scale           int                              `yaml:",omitempty" json:"scale,omitempty"` | ||||
| 	Secrets         []ServiceSecretConfig            `yaml:",omitempty" json:"secrets,omitempty"` | ||||
| 	SecurityOpt     []string                         `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"` | ||||
| 	ShmSize         string                           `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"` | ||||
| 	ShmSize         UnitBytes                        `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"` | ||||
| 	StdinOpen       bool                             `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` | ||||
| 	StopGracePeriod *Duration                        `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` | ||||
| 	StopSignal      string                           `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` | ||||
| @@ -204,14 +165,110 @@ type ServiceConfig struct { | ||||
| 	Ulimits         map[string]*UlimitsConfig        `yaml:",omitempty" json:"ulimits,omitempty"` | ||||
| 	User            string                           `yaml:",omitempty" json:"user,omitempty"` | ||||
| 	UserNSMode      string                           `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` | ||||
| 	Uts             string                           `yaml:"uts,omitempty" json:"uts,omitempty"` | ||||
| 	VolumeDriver    string                           `mapstructure:"volume_driver" yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` | ||||
| 	Volumes         []ServiceVolumeConfig            `yaml:",omitempty" json:"volumes,omitempty"` | ||||
| 	VolumesFrom     []string                         `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` | ||||
| 	WorkingDir      string                           `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` | ||||
| 
 | ||||
| 	Extras map[string]interface{} `yaml:",inline" json:"-"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // NetworksByPriority return the service networks IDs sorted according to Priority | ||||
| func (s *ServiceConfig) NetworksByPriority() []string { | ||||
| 	type key struct { | ||||
| 		name     string | ||||
| 		priority int | ||||
| 	} | ||||
| 	var keys []key | ||||
| 	for k, v := range s.Networks { | ||||
| 		priority := 0 | ||||
| 		if v != nil { | ||||
| 			priority = v.Priority | ||||
| 		} | ||||
| 		keys = append(keys, key{ | ||||
| 			name:     k, | ||||
| 			priority: priority, | ||||
| 		}) | ||||
| 	} | ||||
| 	sort.Slice(keys, func(i, j int) bool { | ||||
| 		return keys[i].priority > keys[j].priority | ||||
| 	}) | ||||
| 	var sorted []string | ||||
| 	for _, k := range keys { | ||||
| 		sorted = append(sorted, k.name) | ||||
| 	} | ||||
| 	return sorted | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	//PullPolicyAlways always pull images | ||||
| 	PullPolicyAlways = "always" | ||||
| 	//PullPolicyNever never pull images | ||||
| 	PullPolicyNever = "never" | ||||
| 	//PullPolicyIfNotPresent pull missing images | ||||
| 	PullPolicyIfNotPresent = "if_not_present" | ||||
| 	//PullPolicyIfNotPresent pull missing images | ||||
| 	PullPolicyMissing = "missing" | ||||
| 	//PullPolicyBuild force building images | ||||
| 	PullPolicyBuild = "build" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	//RestartPolicyAlways always restart the container if it stops | ||||
| 	RestartPolicyAlways = "always" | ||||
| 	//RestartPolicyOnFailure restart the container if it exits due to an error | ||||
| 	RestartPolicyOnFailure = "on-failure" | ||||
| 	//RestartPolicyNo do not automatically restart the container | ||||
| 	RestartPolicyNo = "no" | ||||
| 	//RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) | ||||
| 	RestartPolicyUnlessStopped = "unless-stopped" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	// NetworkModeServicePrefix is the prefix for network_mode pointing to a service | ||||
| 	NetworkModeServicePrefix = "service:" | ||||
| 	// NetworkModeContainerPrefix is the prefix for network_mode pointing to a container | ||||
| 	NetworkModeContainerPrefix = "container:" | ||||
| ) | ||||
| 
 | ||||
| // GetDependencies retrieve all services this service depends on | ||||
| func (s ServiceConfig) GetDependencies() []string { | ||||
| 	dependencies := make(set) | ||||
| 	for dependency := range s.DependsOn { | ||||
| 		dependencies.append(dependency) | ||||
| 	} | ||||
| 	for _, link := range s.Links { | ||||
| 		parts := strings.Split(link, ":") | ||||
| 		if len(parts) == 2 { | ||||
| 			dependencies.append(parts[0]) | ||||
| 		} else { | ||||
| 			dependencies.append(link) | ||||
| 		} | ||||
| 	} | ||||
| 	if strings.HasPrefix(s.NetworkMode, NetworkModeServicePrefix) { | ||||
| 		dependencies.append(s.NetworkMode[len(NetworkModeServicePrefix):]) | ||||
| 	} | ||||
| 	return dependencies.toSlice() | ||||
| } | ||||
| 
 | ||||
| type set map[string]struct{} | ||||
| 
 | ||||
| func (s set) append(strings ...string) { | ||||
| 	for _, str := range strings { | ||||
| 		s[str] = struct{}{} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s set) toSlice() []string { | ||||
| 	slice := make([]string, 0, len(s)) | ||||
| 	for v := range s { | ||||
| 		slice = append(slice, v) | ||||
| 	} | ||||
| 	return slice | ||||
| } | ||||
| 
 | ||||
| // BuildConfig is a type for build | ||||
| // using the same format at libcompose: https://github.com/docker/libcompose/blob/master/yaml/build.go#L12 | ||||
| type BuildConfig struct { | ||||
| 	Context    string            `yaml:",omitempty" json:"context,omitempty"` | ||||
| 	Dockerfile string            `yaml:",omitempty" json:"dockerfile,omitempty"` | ||||
| @@ -219,8 +276,39 @@ type BuildConfig struct { | ||||
| 	Labels     Labels            `yaml:",omitempty" json:"labels,omitempty"` | ||||
| 	CacheFrom  StringList        `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"` | ||||
| 	ExtraHosts HostsList         `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` | ||||
| 	Isolation  string            `yaml:",omitempty" json:"isolation,omitempty"` | ||||
| 	Network    string            `yaml:",omitempty" json:"network,omitempty"` | ||||
| 	Target     string            `yaml:",omitempty" json:"target,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // BlkioConfig define blkio config | ||||
| type BlkioConfig struct { | ||||
| 	Weight          uint16           `yaml:",omitempty" json:"weight,omitempty"` | ||||
| 	WeightDevice    []WeightDevice   `yaml:",omitempty" json:"weight_device,omitempty"` | ||||
| 	DeviceReadBps   []ThrottleDevice `yaml:",omitempty" json:"device_read_bps,omitempty"` | ||||
| 	DeviceReadIOps  []ThrottleDevice `yaml:",omitempty" json:"device_read_iops,omitempty"` | ||||
| 	DeviceWriteBps  []ThrottleDevice `yaml:",omitempty" json:"device_write_bps,omitempty"` | ||||
| 	DeviceWriteIOps []ThrottleDevice `yaml:",omitempty" json:"device_write_iops,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // WeightDevice is a structure that holds device:weight pair | ||||
| type WeightDevice struct { | ||||
| 	Path   string | ||||
| 	Weight uint16 | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ThrottleDevice is a structure that holds device:rate_per_second pair | ||||
| type ThrottleDevice struct { | ||||
| 	Path string | ||||
| 	Rate uint64 | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ShellCommand is a string or list of string args | ||||
| @@ -239,15 +327,83 @@ type StringOrNumberList []string | ||||
| // For the key without value (`key`), the mapped value is set to nil. | ||||
| type MappingWithEquals map[string]*string | ||||
| 
 | ||||
| // NewMappingWithEquals build a new Mapping from a set of KEY=VALUE strings | ||||
| func NewMappingWithEquals(values []string) MappingWithEquals { | ||||
| 	mapping := MappingWithEquals{} | ||||
| 	for _, env := range values { | ||||
| 		tokens := strings.SplitN(env, "=", 2) | ||||
| 		if len(tokens) > 1 { | ||||
| 			mapping[tokens[0]] = &tokens[1] | ||||
| 		} else { | ||||
| 			mapping[env] = nil | ||||
| 		} | ||||
| 	} | ||||
| 	return mapping | ||||
| } | ||||
| 
 | ||||
| // OverrideBy update MappingWithEquals with values from another MappingWithEquals | ||||
| func (e MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals { | ||||
| 	for k, v := range other { | ||||
| 		e[k] = v | ||||
| 	} | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`) | ||||
| func (e MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals { | ||||
| 	for k, v := range e { | ||||
| 		if v == nil || *v == "" { | ||||
| 			if value, ok := lookupFn(k); ok { | ||||
| 				e[k] = &value | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // RemoveEmpty excludes keys that are not associated with a value | ||||
| func (e MappingWithEquals) RemoveEmpty() MappingWithEquals { | ||||
| 	for k, v := range e { | ||||
| 		if v == nil { | ||||
| 			delete(e, k) | ||||
| 		} | ||||
| 	} | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // Mapping is a mapping type that can be converted from a list of | ||||
| // key[=value] strings. | ||||
| // For the key with an empty value (`key=`), or key without value (`key`), the | ||||
| // mapped value is set to an empty string `""`. | ||||
| type Mapping map[string]string | ||||
| 
 | ||||
| // NewMapping build a new Mapping from a set of KEY=VALUE strings | ||||
| func NewMapping(values []string) Mapping { | ||||
| 	mapping := Mapping{} | ||||
| 	for _, value := range values { | ||||
| 		parts := strings.SplitN(value, "=", 2) | ||||
| 		key := parts[0] | ||||
| 		switch { | ||||
| 		case len(parts) == 1: | ||||
| 			mapping[key] = "" | ||||
| 		default: | ||||
| 			mapping[key] = parts[1] | ||||
| 		} | ||||
| 	} | ||||
| 	return mapping | ||||
| } | ||||
| 
 | ||||
| // Labels is a mapping type for labels | ||||
| type Labels map[string]string | ||||
| 
 | ||||
| func (l Labels) Add(key, value string) Labels { | ||||
| 	if l == nil { | ||||
| 		l = Labels{} | ||||
| 	} | ||||
| 	l[key] = value | ||||
| 	return l | ||||
| } | ||||
| 
 | ||||
| // MappingWithColon is a mapping type that can be converted from a list of | ||||
| // 'key: value' strings | ||||
| type MappingWithColon map[string]string | ||||
| @@ -259,6 +415,8 @@ type HostsList []string | ||||
| type LoggingConfig struct { | ||||
| 	Driver  string            `yaml:",omitempty" json:"driver,omitempty"` | ||||
| 	Options map[string]string `yaml:",omitempty" json:"options,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // DeployConfig the deployment configuration for a service | ||||
| @@ -272,6 +430,8 @@ type DeployConfig struct { | ||||
| 	RestartPolicy  *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` | ||||
| 	Placement      Placement      `yaml:",omitempty" json:"placement,omitempty"` | ||||
| 	EndpointMode   string         `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // HealthCheckConfig the healthcheck configuration for a service | ||||
| @@ -282,6 +442,8 @@ type HealthCheckConfig struct { | ||||
| 	Retries     *uint64         `yaml:",omitempty" json:"retries,omitempty"` | ||||
| 	StartPeriod *Duration       `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"` | ||||
| 	Disable     bool            `yaml:",omitempty" json:"disable,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // HealthCheckTest is the command run to test the health of a service | ||||
| @@ -295,34 +457,42 @@ type UpdateConfig struct { | ||||
| 	Monitor         Duration `yaml:",omitempty" json:"monitor,omitempty"` | ||||
| 	MaxFailureRatio float32  `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` | ||||
| 	Order           string   `yaml:",omitempty" json:"order,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // Resources the resource limits and reservations | ||||
| type Resources struct { | ||||
| 	Limits       *ResourceLimit `yaml:",omitempty" json:"limits,omitempty"` | ||||
| 	Limits       *Resource `yaml:",omitempty" json:"limits,omitempty"` | ||||
| 	Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ResourceLimit is a resource to be limited | ||||
| type ResourceLimit struct { | ||||
| 	// TODO: types to convert from units and ratios | ||||
| 	NanoCPUs    string    `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` | ||||
| 	MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` | ||||
| 	Pids        int64     `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"` | ||||
| } | ||||
| 
 | ||||
| // Resource is a resource to be reserved | ||||
| // Resource is a resource to be limited or reserved | ||||
| type Resource struct { | ||||
| 	// TODO: types to convert from units and ratios | ||||
| 	NanoCPUs         string            `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` | ||||
| 	MemoryBytes      UnitBytes         `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` | ||||
| 	Devices          []DeviceRequest   `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"` | ||||
| 	GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| type DeviceRequest struct { | ||||
| 	Capabilities []string `mapstructure:"capabilities" yaml:"capabilities,omitempty" json:"capabilities,omitempty"` | ||||
| 	Driver       string   `mapstructure:"driver" yaml:"driver,omitempty" json:"driver,omitempty"` | ||||
| 	Count        int64    `mapstructure:"count" yaml:"count,omitempty" json:"count,omitempty"` | ||||
| 	IDs          []string `mapstructure:"device_ids" yaml:"device_ids,omitempty" json:"device_ids,omitempty"` | ||||
| } | ||||
| 
 | ||||
| // GenericResource represents a "user defined" resource which can | ||||
| // only be an integer (e.g: SSD=3) for a service | ||||
| type GenericResource struct { | ||||
| 	DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // DiscreteGenericResource represents a "user defined" resource which is defined | ||||
| @@ -332,6 +502,8 @@ type GenericResource struct { | ||||
| type DiscreteGenericResource struct { | ||||
| 	Kind  string `json:"kind"` | ||||
| 	Value int64  `json:"value"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // UnitBytes is the bytes type | ||||
| @@ -353,6 +525,8 @@ type RestartPolicy struct { | ||||
| 	Delay       *Duration `yaml:",omitempty" json:"delay,omitempty"` | ||||
| 	MaxAttempts *uint64   `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` | ||||
| 	Window      *Duration `yaml:",omitempty" json:"window,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // Placement constraints for the service | ||||
| @@ -360,26 +534,83 @@ type Placement struct { | ||||
| 	Constraints []string               `yaml:",omitempty" json:"constraints,omitempty"` | ||||
| 	Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"` | ||||
| 	MaxReplicas uint64                 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // PlacementPreferences is the preferences for a service placement | ||||
| type PlacementPreferences struct { | ||||
| 	Spread string `yaml:",omitempty" json:"spread,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ServiceNetworkConfig is the network configuration for a service | ||||
| type ServiceNetworkConfig struct { | ||||
| 	Priority    int      `yaml:",omitempty" json:"priotirt,omitempty"` | ||||
| 	Aliases     []string `yaml:",omitempty" json:"aliases,omitempty"` | ||||
| 	Ipv4Address string   `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` | ||||
| 	Ipv6Address string   `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ServicePortConfig is the port configuration for a service | ||||
| type ServicePortConfig struct { | ||||
| 	Mode      string `yaml:",omitempty" json:"mode,omitempty"` | ||||
| 	HostIP    string `yaml:"host_ip,omitempty" json:"host_ip,omitempty"` | ||||
| 	Target    uint32 `yaml:",omitempty" json:"target,omitempty"` | ||||
| 	Published uint32 `yaml:",omitempty" json:"published,omitempty"` | ||||
| 	Protocol  string `yaml:",omitempty" json:"protocol,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ParsePortConfig parse short syntax for service port configuration | ||||
| func ParsePortConfig(value string) ([]ServicePortConfig, error) { | ||||
| 	var portConfigs []ServicePortConfig | ||||
| 	ports, portBindings, err := nat.ParsePortSpecs([]string{value}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// We need to sort the key of the ports to make sure it is consistent | ||||
| 	keys := []string{} | ||||
| 	for port := range ports { | ||||
| 		keys = append(keys, string(port)) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 
 | ||||
| 	for _, key := range keys { | ||||
| 		port := nat.Port(key) | ||||
| 		converted, err := convertPortToPortConfig(port, portBindings) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		portConfigs = append(portConfigs, converted...) | ||||
| 	} | ||||
| 	return portConfigs, nil | ||||
| } | ||||
| 
 | ||||
| func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) { | ||||
| 	portConfigs := []ServicePortConfig{} | ||||
| 	for _, binding := range portBindings[port] { | ||||
| 		startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort) | ||||
| 
 | ||||
| 		if err != nil && binding.HostPort != "" { | ||||
| 			return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) | ||||
| 		} | ||||
| 
 | ||||
| 		for i := startHostPort; i <= endHostPort; i++ { | ||||
| 			portConfigs = append(portConfigs, ServicePortConfig{ | ||||
| 				HostIP:    binding.HostIP, | ||||
| 				Protocol:  strings.ToLower(port.Proto()), | ||||
| 				Target:    uint32(port.Int()), | ||||
| 				Published: uint32(i), | ||||
| 				Mode:      "ingress", | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return portConfigs, nil | ||||
| } | ||||
| 
 | ||||
| // ServiceVolumeConfig are references to a volume used by a service | ||||
| @@ -392,21 +623,57 @@ type ServiceVolumeConfig struct { | ||||
| 	Bind        *ServiceVolumeBind   `yaml:",omitempty" json:"bind,omitempty"` | ||||
| 	Volume      *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` | ||||
| 	Tmpfs       *ServiceVolumeTmpfs  `yaml:",omitempty" json:"tmpfs,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	// TypeBind is the type for mounting host dir | ||||
| 	VolumeTypeBind = "bind" | ||||
| 	// TypeVolume is the type for remote storage volumes | ||||
| 	VolumeTypeVolume = "volume" | ||||
| 	// TypeTmpfs is the type for mounting tmpfs | ||||
| 	VolumeTypeTmpfs = "tmpfs" | ||||
| 	// TypeNamedPipe is the type for mounting Windows named pipes | ||||
| 	VolumeTypeNamedPipe = "npipe" | ||||
| ) | ||||
| 
 | ||||
| // ServiceVolumeBind are options for a service volume of type bind | ||||
| type ServiceVolumeBind struct { | ||||
| 	Propagation    string `yaml:",omitempty" json:"propagation,omitempty"` | ||||
| 	CreateHostPath bool   `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // Propagation represents the propagation of a mount. | ||||
| const ( | ||||
| 	// PropagationRPrivate RPRIVATE | ||||
| 	PropagationRPrivate string = "rprivate" | ||||
| 	// PropagationPrivate PRIVATE | ||||
| 	PropagationPrivate string = "private" | ||||
| 	// PropagationRShared RSHARED | ||||
| 	PropagationRShared string = "rshared" | ||||
| 	// PropagationShared SHARED | ||||
| 	PropagationShared string = "shared" | ||||
| 	// PropagationRSlave RSLAVE | ||||
| 	PropagationRSlave string = "rslave" | ||||
| 	// PropagationSlave SLAVE | ||||
| 	PropagationSlave string = "slave" | ||||
| ) | ||||
| 
 | ||||
| // ServiceVolumeVolume are options for a service volume of type volume | ||||
| type ServiceVolumeVolume struct { | ||||
| 	NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ServiceVolumeTmpfs are options for a service volume of type tmpfs | ||||
| type ServiceVolumeTmpfs struct { | ||||
| 	Size int64 `yaml:",omitempty" json:"size,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // FileReferenceConfig for a reference to a swarm file object | ||||
| @@ -416,6 +683,8 @@ type FileReferenceConfig struct { | ||||
| 	UID    string  `yaml:",omitempty" json:"uid,omitempty"` | ||||
| 	GID    string  `yaml:",omitempty" json:"gid,omitempty"` | ||||
| 	Mode   *uint32 `yaml:",omitempty" json:"mode,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // ServiceConfigObjConfig is the config obj configuration for a service | ||||
| @@ -429,6 +698,8 @@ type UlimitsConfig struct { | ||||
| 	Single int `yaml:",omitempty" json:"single,omitempty"` | ||||
| 	Soft   int `yaml:",omitempty" json:"soft,omitempty"` | ||||
| 	Hard   int `yaml:",omitempty" json:"hard,omitempty"` | ||||
| 
 | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // MarshalYAML makes UlimitsConfig implement yaml.Marshaller | ||||
| @@ -458,18 +729,23 @@ type NetworkConfig struct { | ||||
| 	Internal   bool                   `yaml:",omitempty" json:"internal,omitempty"` | ||||
| 	Attachable bool                   `yaml:",omitempty" json:"attachable,omitempty"` | ||||
| 	Labels     Labels                 `yaml:",omitempty" json:"labels,omitempty"` | ||||
| 	Extras     map[string]interface{} `yaml:",inline" json:"-"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // IPAMConfig for a network | ||||
| type IPAMConfig struct { | ||||
| 	Driver     string                 `yaml:",omitempty" json:"driver,omitempty"` | ||||
| 	Config     []*IPAMPool            `yaml:",omitempty" json:"config,omitempty"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // IPAMPool for a network | ||||
| type IPAMPool struct { | ||||
| 	Subnet             string                 `yaml:",omitempty" json:"subnet,omitempty"` | ||||
| 	Gateway            string                 `yaml:",omitempty" json:"gateway,omitempty"` | ||||
| 	IPRange            string                 `mapstructure:"ip_range" yaml:"ip_range,omitempty" json:"ip_range,omitempty"` | ||||
| 	AuxiliaryAddresses map[string]string      `mapstructure:"aux_addresses" yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` | ||||
| 	Extensions         map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // VolumeConfig for a volume | ||||
| @@ -479,7 +755,7 @@ type VolumeConfig struct { | ||||
| 	DriverOpts map[string]string      `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` | ||||
| 	External   External               `yaml:",omitempty" json:"external,omitempty"` | ||||
| 	Labels     Labels                 `yaml:",omitempty" json:"labels,omitempty"` | ||||
| 	Extras     map[string]interface{} `yaml:",inline" json:"-"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // External identifies a Volume or Network as a reference to a resource that is | ||||
| @@ -488,6 +764,7 @@ type VolumeConfig struct { | ||||
| type External struct { | ||||
| 	Name       string                 `yaml:",omitempty" json:"name,omitempty"` | ||||
| 	External   bool                   `yaml:",omitempty" json:"external,omitempty"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // MarshalYAML makes External implement yaml.Marshaller | ||||
| @@ -511,6 +788,7 @@ type CredentialSpecConfig struct { | ||||
| 	Config     string                 `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40 | ||||
| 	File       string                 `yaml:",omitempty" json:"file,omitempty"` | ||||
| 	Registry   string                 `yaml:",omitempty" json:"registry,omitempty"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| // FileObjectConfig is a config type for a file used by a service | ||||
| @@ -519,12 +797,32 @@ type FileObjectConfig struct { | ||||
| 	File           string                 `yaml:",omitempty" json:"file,omitempty"` | ||||
| 	External       External               `yaml:",omitempty" json:"external,omitempty"` | ||||
| 	Labels         Labels                 `yaml:",omitempty" json:"labels,omitempty"` | ||||
| 	Extras         map[string]interface{} `yaml:",inline" json:"-"` | ||||
| 	Driver         string                 `yaml:",omitempty" json:"driver,omitempty"` | ||||
| 	DriverOpts     map[string]string      `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` | ||||
| 	TemplateDriver string                 `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"` | ||||
| 	Extensions     map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	// ServiceConditionCompletedSuccessfully is the type for waiting until a service has completed successfully (exit code 0). | ||||
| 	ServiceConditionCompletedSuccessfully = "service_completed_successfully" | ||||
| 
 | ||||
| 	// ServiceConditionHealthy is the type for waiting until a service is healthy. | ||||
| 	ServiceConditionHealthy = "service_healthy" | ||||
| 
 | ||||
| 	// ServiceConditionStarted is the type for waiting until a service has started (default). | ||||
| 	ServiceConditionStarted = "service_started" | ||||
| ) | ||||
| 
 | ||||
| type DependsOnConfig map[string]ServiceDependency | ||||
| 
 | ||||
| type ServiceDependency struct { | ||||
| 	Condition  string                 `yaml:",omitempty" json:"condition,omitempty"` | ||||
| 	Extensions map[string]interface{} `yaml:",inline" json:"-"` | ||||
| } | ||||
| 
 | ||||
| type ExtendsConfig MappingWithEquals | ||||
| 
 | ||||
| // SecretConfig for a secret | ||||
| type SecretConfig FileObjectConfig | ||||
| 
 | ||||
							
								
								
									
										202
									
								
								vendor/github.com/distribution/distribution/v3/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/distribution/distribution/v3/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
| Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright {yyyy} {name of copyright owner} | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
|  | ||||
							
								
								
									
										247
									
								
								vendor/github.com/distribution/distribution/v3/digestset/set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										247
									
								
								vendor/github.com/distribution/distribution/v3/digestset/set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,247 @@ | ||||
| package digestset | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	digest "github.com/opencontainers/go-digest" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrDigestNotFound is used when a matching digest | ||||
| 	// could not be found in a set. | ||||
| 	ErrDigestNotFound = errors.New("digest not found") | ||||
|  | ||||
| 	// ErrDigestAmbiguous is used when multiple digests | ||||
| 	// are found in a set. None of the matching digests | ||||
| 	// should be considered valid matches. | ||||
| 	ErrDigestAmbiguous = errors.New("ambiguous digest string") | ||||
| ) | ||||
|  | ||||
| // Set is used to hold a unique set of digests which | ||||
| // may be easily referenced by easily  referenced by a string | ||||
| // representation of the digest as well as short representation. | ||||
| // The uniqueness of the short representation is based on other | ||||
| // digests in the set. If digests are omitted from this set, | ||||
| // collisions in a larger set may not be detected, therefore it | ||||
| // is important to always do short representation lookups on | ||||
| // the complete set of digests. To mitigate collisions, an | ||||
| // appropriately long short code should be used. | ||||
| type Set struct { | ||||
| 	mutex   sync.RWMutex | ||||
| 	entries digestEntries | ||||
| } | ||||
|  | ||||
| // NewSet creates an empty set of digests | ||||
| // which may have digests added. | ||||
| func NewSet() *Set { | ||||
| 	return &Set{ | ||||
| 		entries: digestEntries{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // checkShortMatch checks whether two digests match as either whole | ||||
| // values or short values. This function does not test equality, | ||||
| // rather whether the second value could match against the first | ||||
| // value. | ||||
| func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { | ||||
| 	if len(hex) == len(shortHex) { | ||||
| 		if hex != shortHex { | ||||
| 			return false | ||||
| 		} | ||||
| 		if len(shortAlg) > 0 && string(alg) != shortAlg { | ||||
| 			return false | ||||
| 		} | ||||
| 	} else if !strings.HasPrefix(hex, shortHex) { | ||||
| 		return false | ||||
| 	} else if len(shortAlg) > 0 && string(alg) != shortAlg { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Lookup looks for a digest matching the given string representation. | ||||
| // If no digests could be found ErrDigestNotFound will be returned | ||||
| // with an empty digest value. If multiple matches are found | ||||
| // ErrDigestAmbiguous will be returned with an empty digest value. | ||||
| func (dst *Set) Lookup(d string) (digest.Digest, error) { | ||||
| 	dst.mutex.RLock() | ||||
| 	defer dst.mutex.RUnlock() | ||||
| 	if len(dst.entries) == 0 { | ||||
| 		return "", ErrDigestNotFound | ||||
| 	} | ||||
| 	var ( | ||||
| 		searchFunc func(int) bool | ||||
| 		alg        digest.Algorithm | ||||
| 		hex        string | ||||
| 	) | ||||
| 	dgst, err := digest.Parse(d) | ||||
| 	if err == digest.ErrDigestInvalidFormat { | ||||
| 		hex = d | ||||
| 		searchFunc = func(i int) bool { | ||||
| 			return dst.entries[i].val >= d | ||||
| 		} | ||||
| 	} else { | ||||
| 		hex = dgst.Hex() | ||||
| 		alg = dgst.Algorithm() | ||||
| 		searchFunc = func(i int) bool { | ||||
| 			if dst.entries[i].val == hex { | ||||
| 				return dst.entries[i].alg >= alg | ||||
| 			} | ||||
| 			return dst.entries[i].val >= hex | ||||
| 		} | ||||
| 	} | ||||
| 	idx := sort.Search(len(dst.entries), searchFunc) | ||||
| 	if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { | ||||
| 		return "", ErrDigestNotFound | ||||
| 	} | ||||
| 	if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { | ||||
| 		return dst.entries[idx].digest, nil | ||||
| 	} | ||||
| 	if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { | ||||
| 		return "", ErrDigestAmbiguous | ||||
| 	} | ||||
|  | ||||
| 	return dst.entries[idx].digest, nil | ||||
| } | ||||
|  | ||||
| // Add adds the given digest to the set. An error will be returned | ||||
| // if the given digest is invalid. If the digest already exists in the | ||||
| // set, this operation will be a no-op. | ||||
| func (dst *Set) Add(d digest.Digest) error { | ||||
| 	if err := d.Validate(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	dst.mutex.Lock() | ||||
| 	defer dst.mutex.Unlock() | ||||
| 	entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} | ||||
| 	searchFunc := func(i int) bool { | ||||
| 		if dst.entries[i].val == entry.val { | ||||
| 			return dst.entries[i].alg >= entry.alg | ||||
| 		} | ||||
| 		return dst.entries[i].val >= entry.val | ||||
| 	} | ||||
| 	idx := sort.Search(len(dst.entries), searchFunc) | ||||
| 	if idx == len(dst.entries) { | ||||
| 		dst.entries = append(dst.entries, entry) | ||||
| 		return nil | ||||
| 	} else if dst.entries[idx].digest == d { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	entries := append(dst.entries, nil) | ||||
| 	copy(entries[idx+1:], entries[idx:len(entries)-1]) | ||||
| 	entries[idx] = entry | ||||
| 	dst.entries = entries | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Remove removes the given digest from the set. An err will be | ||||
| // returned if the given digest is invalid. If the digest does | ||||
| // not exist in the set, this operation will be a no-op. | ||||
| func (dst *Set) Remove(d digest.Digest) error { | ||||
| 	if err := d.Validate(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	dst.mutex.Lock() | ||||
| 	defer dst.mutex.Unlock() | ||||
| 	entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} | ||||
| 	searchFunc := func(i int) bool { | ||||
| 		if dst.entries[i].val == entry.val { | ||||
| 			return dst.entries[i].alg >= entry.alg | ||||
| 		} | ||||
| 		return dst.entries[i].val >= entry.val | ||||
| 	} | ||||
| 	idx := sort.Search(len(dst.entries), searchFunc) | ||||
| 	// Not found if idx is after or value at idx is not digest | ||||
| 	if idx == len(dst.entries) || dst.entries[idx].digest != d { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	entries := dst.entries | ||||
| 	copy(entries[idx:], entries[idx+1:]) | ||||
| 	entries = entries[:len(entries)-1] | ||||
| 	dst.entries = entries | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // All returns all the digests in the set | ||||
| func (dst *Set) All() []digest.Digest { | ||||
| 	dst.mutex.RLock() | ||||
| 	defer dst.mutex.RUnlock() | ||||
| 	retValues := make([]digest.Digest, len(dst.entries)) | ||||
| 	for i := range dst.entries { | ||||
| 		retValues[i] = dst.entries[i].digest | ||||
| 	} | ||||
|  | ||||
| 	return retValues | ||||
| } | ||||
|  | ||||
| // ShortCodeTable returns a map of Digest to unique short codes. The | ||||
| // length represents the minimum value, the maximum length may be the | ||||
| // entire value of digest if uniqueness cannot be achieved without the | ||||
| // full value. This function will attempt to make short codes as short | ||||
| // as possible to be unique. | ||||
| func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { | ||||
| 	dst.mutex.RLock() | ||||
| 	defer dst.mutex.RUnlock() | ||||
| 	m := make(map[digest.Digest]string, len(dst.entries)) | ||||
| 	l := length | ||||
| 	resetIdx := 0 | ||||
| 	for i := 0; i < len(dst.entries); i++ { | ||||
| 		var short string | ||||
| 		extended := true | ||||
| 		for extended { | ||||
| 			extended = false | ||||
| 			if len(dst.entries[i].val) <= l { | ||||
| 				short = dst.entries[i].digest.String() | ||||
| 			} else { | ||||
| 				short = dst.entries[i].val[:l] | ||||
| 				for j := i + 1; j < len(dst.entries); j++ { | ||||
| 					if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { | ||||
| 						if j > resetIdx { | ||||
| 							resetIdx = j | ||||
| 						} | ||||
| 						extended = true | ||||
| 					} else { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 				if extended { | ||||
| 					l++ | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		m[dst.entries[i].digest] = short | ||||
| 		if i >= resetIdx { | ||||
| 			l = length | ||||
| 		} | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| type digestEntry struct { | ||||
| 	alg    digest.Algorithm | ||||
| 	val    string | ||||
| 	digest digest.Digest | ||||
| } | ||||
|  | ||||
| type digestEntries []*digestEntry | ||||
|  | ||||
| func (d digestEntries) Len() int { | ||||
| 	return len(d) | ||||
| } | ||||
|  | ||||
| func (d digestEntries) Less(i, j int) bool { | ||||
| 	if d[i].val != d[j].val { | ||||
| 		return d[i].val < d[j].val | ||||
| 	} | ||||
| 	return d[i].alg < d[j].alg | ||||
| } | ||||
|  | ||||
| func (d digestEntries) Swap(i, j int) { | ||||
| 	d[i], d[j] = d[j], d[i] | ||||
| } | ||||
							
								
								
									
										42
									
								
								vendor/github.com/distribution/distribution/v3/reference/helpers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/distribution/distribution/v3/reference/helpers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| package reference | ||||
|  | ||||
| import "path" | ||||
|  | ||||
| // IsNameOnly returns true if reference only contains a repo name. | ||||
| func IsNameOnly(ref Named) bool { | ||||
| 	if _, ok := ref.(NamedTagged); ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	if _, ok := ref.(Canonical); ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // FamiliarName returns the familiar name string | ||||
| // for the given named, familiarizing if needed. | ||||
| func FamiliarName(ref Named) string { | ||||
| 	if nn, ok := ref.(normalizedNamed); ok { | ||||
| 		return nn.Familiar().Name() | ||||
| 	} | ||||
| 	return ref.Name() | ||||
| } | ||||
|  | ||||
| // FamiliarString returns the familiar string representation | ||||
| // for the given reference, familiarizing if needed. | ||||
| func FamiliarString(ref Reference) string { | ||||
| 	if nn, ok := ref.(normalizedNamed); ok { | ||||
| 		return nn.Familiar().String() | ||||
| 	} | ||||
| 	return ref.String() | ||||
| } | ||||
|  | ||||
| // FamiliarMatch reports whether ref matches the specified pattern. | ||||
| // See https://godoc.org/path#Match for supported patterns. | ||||
| func FamiliarMatch(pattern string, ref Reference) (bool, error) { | ||||
| 	matched, err := path.Match(pattern, FamiliarString(ref)) | ||||
| 	if namedRef, isNamed := ref.(Named); isNamed && !matched { | ||||
| 		matched, _ = path.Match(pattern, FamiliarName(namedRef)) | ||||
| 	} | ||||
| 	return matched, err | ||||
| } | ||||
							
								
								
									
										198
									
								
								vendor/github.com/distribution/distribution/v3/reference/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								vendor/github.com/distribution/distribution/v3/reference/normalize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,198 @@ | ||||
| package reference | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/distribution/distribution/v3/digestset" | ||||
| 	"github.com/opencontainers/go-digest" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	legacyDefaultDomain = "index.docker.io" | ||||
| 	defaultDomain       = "docker.io" | ||||
| 	officialRepoName    = "library" | ||||
| 	defaultTag          = "latest" | ||||
| ) | ||||
|  | ||||
| // normalizedNamed represents a name which has been | ||||
| // normalized and has a familiar form. A familiar name | ||||
| // is what is used in Docker UI. An example normalized | ||||
| // name is "docker.io/library/ubuntu" and corresponding | ||||
| // familiar name of "ubuntu". | ||||
| type normalizedNamed interface { | ||||
| 	Named | ||||
| 	Familiar() Named | ||||
| } | ||||
|  | ||||
| // ParseNormalizedNamed parses a string into a named reference | ||||
| // transforming a familiar name from Docker UI to a fully | ||||
| // qualified reference. If the value may be an identifier | ||||
| // use ParseAnyReference. | ||||
| func ParseNormalizedNamed(s string) (Named, error) { | ||||
| 	if ok := anchoredIdentifierRegexp.MatchString(s); ok { | ||||
| 		return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) | ||||
| 	} | ||||
| 	domain, remainder := splitDockerDomain(s) | ||||
| 	var remoteName string | ||||
| 	if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { | ||||
| 		remoteName = remainder[:tagSep] | ||||
| 	} else { | ||||
| 		remoteName = remainder | ||||
| 	} | ||||
| 	if strings.ToLower(remoteName) != remoteName { | ||||
| 		return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) | ||||
| 	} | ||||
|  | ||||
| 	ref, err := Parse(domain + "/" + remainder) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	named, isNamed := ref.(Named) | ||||
| 	if !isNamed { | ||||
| 		return nil, fmt.Errorf("reference %s has no name", ref.String()) | ||||
| 	} | ||||
| 	return named, nil | ||||
| } | ||||
|  | ||||
| // ParseDockerRef normalizes the image reference following the docker convention. This is added | ||||
| // mainly for backward compatibility. | ||||
| // The reference returned can only be either tagged or digested. For reference contains both tag | ||||
| // and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ | ||||
| // sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as | ||||
| // docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. | ||||
| func ParseDockerRef(ref string) (Named, error) { | ||||
| 	named, err := ParseNormalizedNamed(ref) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if _, ok := named.(NamedTagged); ok { | ||||
| 		if canonical, ok := named.(Canonical); ok { | ||||
| 			// The reference is both tagged and digested, only | ||||
| 			// return digested. | ||||
| 			newNamed, err := WithName(canonical.Name()) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			newCanonical, err := WithDigest(newNamed, canonical.Digest()) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			return newCanonical, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return TagNameOnly(named), nil | ||||
| } | ||||
|  | ||||
| // splitDockerDomain splits a repository name to domain and remotename string. | ||||
| // If no valid domain is found, the default domain is used. Repository name | ||||
| // needs to be already validated before. | ||||
| func splitDockerDomain(name string) (domain, remainder string) { | ||||
| 	i := strings.IndexRune(name, '/') | ||||
| 	if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { | ||||
| 		domain, remainder = defaultDomain, name | ||||
| 	} else { | ||||
| 		domain, remainder = name[:i], name[i+1:] | ||||
| 	} | ||||
| 	if domain == legacyDefaultDomain { | ||||
| 		domain = defaultDomain | ||||
| 	} | ||||
| 	if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { | ||||
| 		remainder = officialRepoName + "/" + remainder | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // familiarizeName returns a shortened version of the name familiar | ||||
| // to to the Docker UI. Familiar names have the default domain | ||||
| // "docker.io" and "library/" repository prefix removed. | ||||
| // For example, "docker.io/library/redis" will have the familiar | ||||
| // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". | ||||
| // Returns a familiarized named only reference. | ||||
| func familiarizeName(named namedRepository) repository { | ||||
| 	repo := repository{ | ||||
| 		domain: named.Domain(), | ||||
| 		path:   named.Path(), | ||||
| 	} | ||||
|  | ||||
| 	if repo.domain == defaultDomain { | ||||
| 		repo.domain = "" | ||||
| 		// Handle official repositories which have the pattern "library/<official repo name>" | ||||
| 		if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { | ||||
| 			repo.path = split[1] | ||||
| 		} | ||||
| 	} | ||||
| 	return repo | ||||
| } | ||||
|  | ||||
| func (r reference) Familiar() Named { | ||||
| 	return reference{ | ||||
| 		namedRepository: familiarizeName(r.namedRepository), | ||||
| 		tag:             r.tag, | ||||
| 		digest:          r.digest, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r repository) Familiar() Named { | ||||
| 	return familiarizeName(r) | ||||
| } | ||||
|  | ||||
| func (t taggedReference) Familiar() Named { | ||||
| 	return taggedReference{ | ||||
| 		namedRepository: familiarizeName(t.namedRepository), | ||||
| 		tag:             t.tag, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c canonicalReference) Familiar() Named { | ||||
| 	return canonicalReference{ | ||||
| 		namedRepository: familiarizeName(c.namedRepository), | ||||
| 		digest:          c.digest, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TagNameOnly adds the default tag "latest" to a reference if it only has | ||||
| // a repo name. | ||||
| func TagNameOnly(ref Named) Named { | ||||
| 	if IsNameOnly(ref) { | ||||
| 		namedTagged, err := WithTag(ref, defaultTag) | ||||
| 		if err != nil { | ||||
| 			// Default tag must be valid, to create a NamedTagged | ||||
| 			// type with non-validated input the WithTag function | ||||
| 			// should be used instead | ||||
| 			panic(err) | ||||
| 		} | ||||
| 		return namedTagged | ||||
| 	} | ||||
| 	return ref | ||||
| } | ||||
|  | ||||
| // ParseAnyReference parses a reference string as a possible identifier, | ||||
| // full digest, or familiar name. | ||||
| func ParseAnyReference(ref string) (Reference, error) { | ||||
| 	if ok := anchoredIdentifierRegexp.MatchString(ref); ok { | ||||
| 		return digestReference("sha256:" + ref), nil | ||||
| 	} | ||||
| 	if dgst, err := digest.Parse(ref); err == nil { | ||||
| 		return digestReference(dgst), nil | ||||
| 	} | ||||
|  | ||||
| 	return ParseNormalizedNamed(ref) | ||||
| } | ||||
|  | ||||
| // ParseAnyReferenceWithSet parses a reference string as a possible short | ||||
| // identifier to be matched in a digest set, a full digest, or familiar name. | ||||
| func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { | ||||
| 	if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { | ||||
| 		dgst, err := ds.Lookup(ref) | ||||
| 		if err == nil { | ||||
| 			return digestReference(dgst), nil | ||||
| 		} | ||||
| 	} else { | ||||
| 		if dgst, err := digest.Parse(ref); err == nil { | ||||
| 			return digestReference(dgst), nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return ParseNormalizedNamed(ref) | ||||
| } | ||||
							
								
								
									
										433
									
								
								vendor/github.com/distribution/distribution/v3/reference/reference.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										433
									
								
								vendor/github.com/distribution/distribution/v3/reference/reference.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,433 @@ | ||||
| // Package reference provides a general type to represent any way of referencing images within the registry. | ||||
| // Its main purpose is to abstract tags and digests (content-addressable hash). | ||||
| // | ||||
| // Grammar | ||||
| // | ||||
| // 	reference                       := name [ ":" tag ] [ "@" digest ] | ||||
| //	name                            := [domain '/'] path-component ['/' path-component]* | ||||
| //	domain                          := domain-component ['.' domain-component]* [':' port-number] | ||||
| //	domain-component                := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ | ||||
| //	port-number                     := /[0-9]+/ | ||||
| //	path-component                  := alpha-numeric [separator alpha-numeric]* | ||||
| // 	alpha-numeric                   := /[a-z0-9]+/ | ||||
| //	separator                       := /[_.]|__|[-]*/ | ||||
| // | ||||
| //	tag                             := /[\w][\w.-]{0,127}/ | ||||
| // | ||||
| //	digest                          := digest-algorithm ":" digest-hex | ||||
| //	digest-algorithm                := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* | ||||
| //	digest-algorithm-separator      := /[+.-_]/ | ||||
| //	digest-algorithm-component      := /[A-Za-z][A-Za-z0-9]*/ | ||||
| //	digest-hex                      := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value | ||||
| // | ||||
| //	identifier                      := /[a-f0-9]{64}/ | ||||
| //	short-identifier                := /[a-f0-9]{6,64}/ | ||||
| package reference | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/opencontainers/go-digest" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// NameTotalLengthMax is the maximum total number of characters in a repository name. | ||||
| 	NameTotalLengthMax = 255 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. | ||||
| 	ErrReferenceInvalidFormat = errors.New("invalid reference format") | ||||
|  | ||||
| 	// ErrTagInvalidFormat represents an error while trying to parse a string as a tag. | ||||
| 	ErrTagInvalidFormat = errors.New("invalid tag format") | ||||
|  | ||||
| 	// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. | ||||
| 	ErrDigestInvalidFormat = errors.New("invalid digest format") | ||||
|  | ||||
| 	// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. | ||||
| 	ErrNameContainsUppercase = errors.New("repository name must be lowercase") | ||||
|  | ||||
| 	// ErrNameEmpty is returned for empty, invalid repository names. | ||||
| 	ErrNameEmpty = errors.New("repository name must have at least one component") | ||||
|  | ||||
| 	// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. | ||||
| 	ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) | ||||
|  | ||||
| 	// ErrNameNotCanonical is returned when a name is not canonical. | ||||
| 	ErrNameNotCanonical = errors.New("repository name must be canonical") | ||||
| ) | ||||
|  | ||||
| // Reference is an opaque object reference identifier that may include | ||||
| // modifiers such as a hostname, name, tag, and digest. | ||||
| type Reference interface { | ||||
| 	// String returns the full reference | ||||
| 	String() string | ||||
| } | ||||
|  | ||||
| // Field provides a wrapper type for resolving correct reference types when | ||||
| // working with encoding. | ||||
| type Field struct { | ||||
| 	reference Reference | ||||
| } | ||||
|  | ||||
| // AsField wraps a reference in a Field for encoding. | ||||
| func AsField(reference Reference) Field { | ||||
| 	return Field{reference} | ||||
| } | ||||
|  | ||||
| // Reference unwraps the reference type from the field to | ||||
| // return the Reference object. This object should be | ||||
| // of the appropriate type to further check for different | ||||
| // reference types. | ||||
| func (f Field) Reference() Reference { | ||||
| 	return f.reference | ||||
| } | ||||
|  | ||||
| // MarshalText serializes the field to byte text which | ||||
| // is the string of the reference. | ||||
| func (f Field) MarshalText() (p []byte, err error) { | ||||
| 	return []byte(f.reference.String()), nil | ||||
| } | ||||
|  | ||||
| // UnmarshalText parses text bytes by invoking the | ||||
| // reference parser to ensure the appropriately | ||||
| // typed reference object is wrapped by field. | ||||
| func (f *Field) UnmarshalText(p []byte) error { | ||||
| 	r, err := Parse(string(p)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	f.reference = r | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Named is an object with a full name | ||||
| type Named interface { | ||||
| 	Reference | ||||
| 	Name() string | ||||
| } | ||||
|  | ||||
| // Tagged is an object which has a tag | ||||
| type Tagged interface { | ||||
| 	Reference | ||||
| 	Tag() string | ||||
| } | ||||
|  | ||||
| // NamedTagged is an object including a name and tag. | ||||
| type NamedTagged interface { | ||||
| 	Named | ||||
| 	Tag() string | ||||
| } | ||||
|  | ||||
| // Digested is an object which has a digest | ||||
| // in which it can be referenced by | ||||
| type Digested interface { | ||||
| 	Reference | ||||
| 	Digest() digest.Digest | ||||
| } | ||||
|  | ||||
| // Canonical reference is an object with a fully unique | ||||
| // name including a name with domain and digest | ||||
| type Canonical interface { | ||||
| 	Named | ||||
| 	Digest() digest.Digest | ||||
| } | ||||
|  | ||||
| // namedRepository is a reference to a repository with a name. | ||||
| // A namedRepository has both domain and path components. | ||||
| type namedRepository interface { | ||||
| 	Named | ||||
| 	Domain() string | ||||
| 	Path() string | ||||
| } | ||||
|  | ||||
| // Domain returns the domain part of the Named reference | ||||
| func Domain(named Named) string { | ||||
| 	if r, ok := named.(namedRepository); ok { | ||||
| 		return r.Domain() | ||||
| 	} | ||||
| 	domain, _ := splitDomain(named.Name()) | ||||
| 	return domain | ||||
| } | ||||
|  | ||||
| // Path returns the name without the domain part of the Named reference | ||||
| func Path(named Named) (name string) { | ||||
| 	if r, ok := named.(namedRepository); ok { | ||||
| 		return r.Path() | ||||
| 	} | ||||
| 	_, path := splitDomain(named.Name()) | ||||
| 	return path | ||||
| } | ||||
|  | ||||
| func splitDomain(name string) (string, string) { | ||||
| 	match := anchoredNameRegexp.FindStringSubmatch(name) | ||||
| 	if len(match) != 3 { | ||||
| 		return "", name | ||||
| 	} | ||||
| 	return match[1], match[2] | ||||
| } | ||||
|  | ||||
| // SplitHostname splits a named reference into a | ||||
| // hostname and name string. If no valid hostname is | ||||
| // found, the hostname is empty and the full value | ||||
| // is returned as name | ||||
| // DEPRECATED: Use Domain or Path | ||||
| func SplitHostname(named Named) (string, string) { | ||||
| 	if r, ok := named.(namedRepository); ok { | ||||
| 		return r.Domain(), r.Path() | ||||
| 	} | ||||
| 	return splitDomain(named.Name()) | ||||
| } | ||||
|  | ||||
| // Parse parses s and returns a syntactically valid Reference. | ||||
| // If an error was encountered it is returned, along with a nil Reference. | ||||
| // NOTE: Parse will not handle short digests. | ||||
| func Parse(s string) (Reference, error) { | ||||
| 	matches := ReferenceRegexp.FindStringSubmatch(s) | ||||
| 	if matches == nil { | ||||
| 		if s == "" { | ||||
| 			return nil, ErrNameEmpty | ||||
| 		} | ||||
| 		if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { | ||||
| 			return nil, ErrNameContainsUppercase | ||||
| 		} | ||||
| 		return nil, ErrReferenceInvalidFormat | ||||
| 	} | ||||
|  | ||||
| 	if len(matches[1]) > NameTotalLengthMax { | ||||
| 		return nil, ErrNameTooLong | ||||
| 	} | ||||
|  | ||||
| 	var repo repository | ||||
|  | ||||
| 	nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) | ||||
| 	if len(nameMatch) == 3 { | ||||
| 		repo.domain = nameMatch[1] | ||||
| 		repo.path = nameMatch[2] | ||||
| 	} else { | ||||
| 		repo.domain = "" | ||||
| 		repo.path = matches[1] | ||||
| 	} | ||||
|  | ||||
| 	ref := reference{ | ||||
| 		namedRepository: repo, | ||||
| 		tag:             matches[2], | ||||
| 	} | ||||
| 	if matches[3] != "" { | ||||
| 		var err error | ||||
| 		ref.digest, err = digest.Parse(matches[3]) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	r := getBestReferenceType(ref) | ||||
| 	if r == nil { | ||||
| 		return nil, ErrNameEmpty | ||||
| 	} | ||||
|  | ||||
| 	return r, nil | ||||
| } | ||||
|  | ||||
| // ParseNamed parses s and returns a syntactically valid reference implementing | ||||
| // the Named interface. The reference must have a name and be in the canonical | ||||
| // form, otherwise an error is returned. | ||||
| // If an error was encountered it is returned, along with a nil Reference. | ||||
| // NOTE: ParseNamed will not handle short digests. | ||||
| func ParseNamed(s string) (Named, error) { | ||||
| 	named, err := ParseNormalizedNamed(s) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if named.String() != s { | ||||
| 		return nil, ErrNameNotCanonical | ||||
| 	} | ||||
| 	return named, nil | ||||
| } | ||||
|  | ||||
| // WithName returns a named object representing the given string. If the input | ||||
| // is invalid ErrReferenceInvalidFormat will be returned. | ||||
| func WithName(name string) (Named, error) { | ||||
| 	if len(name) > NameTotalLengthMax { | ||||
| 		return nil, ErrNameTooLong | ||||
| 	} | ||||
|  | ||||
| 	match := anchoredNameRegexp.FindStringSubmatch(name) | ||||
| 	if match == nil || len(match) != 3 { | ||||
| 		return nil, ErrReferenceInvalidFormat | ||||
| 	} | ||||
| 	return repository{ | ||||
| 		domain: match[1], | ||||
| 		path:   match[2], | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // WithTag combines the name from "name" and the tag from "tag" to form a | ||||
| // reference incorporating both the name and the tag. | ||||
| func WithTag(name Named, tag string) (NamedTagged, error) { | ||||
| 	if !anchoredTagRegexp.MatchString(tag) { | ||||
| 		return nil, ErrTagInvalidFormat | ||||
| 	} | ||||
| 	var repo repository | ||||
| 	if r, ok := name.(namedRepository); ok { | ||||
| 		repo.domain = r.Domain() | ||||
| 		repo.path = r.Path() | ||||
| 	} else { | ||||
| 		repo.path = name.Name() | ||||
| 	} | ||||
| 	if canonical, ok := name.(Canonical); ok { | ||||
| 		return reference{ | ||||
| 			namedRepository: repo, | ||||
| 			tag:             tag, | ||||
| 			digest:          canonical.Digest(), | ||||
| 		}, nil | ||||
| 	} | ||||
| 	return taggedReference{ | ||||
| 		namedRepository: repo, | ||||
| 		tag:             tag, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // WithDigest combines the name from "name" and the digest from "digest" to form | ||||
| // a reference incorporating both the name and the digest. | ||||
| func WithDigest(name Named, digest digest.Digest) (Canonical, error) { | ||||
| 	if !anchoredDigestRegexp.MatchString(digest.String()) { | ||||
| 		return nil, ErrDigestInvalidFormat | ||||
| 	} | ||||
| 	var repo repository | ||||
| 	if r, ok := name.(namedRepository); ok { | ||||
| 		repo.domain = r.Domain() | ||||
| 		repo.path = r.Path() | ||||
| 	} else { | ||||
| 		repo.path = name.Name() | ||||
| 	} | ||||
| 	if tagged, ok := name.(Tagged); ok { | ||||
| 		return reference{ | ||||
| 			namedRepository: repo, | ||||
| 			tag:             tagged.Tag(), | ||||
| 			digest:          digest, | ||||
| 		}, nil | ||||
| 	} | ||||
| 	return canonicalReference{ | ||||
| 		namedRepository: repo, | ||||
| 		digest:          digest, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // TrimNamed removes any tag or digest from the named reference. | ||||
| func TrimNamed(ref Named) Named { | ||||
| 	domain, path := SplitHostname(ref) | ||||
| 	return repository{ | ||||
| 		domain: domain, | ||||
| 		path:   path, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getBestReferenceType(ref reference) Reference { | ||||
| 	if ref.Name() == "" { | ||||
| 		// Allow digest only references | ||||
| 		if ref.digest != "" { | ||||
| 			return digestReference(ref.digest) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	if ref.tag == "" { | ||||
| 		if ref.digest != "" { | ||||
| 			return canonicalReference{ | ||||
| 				namedRepository: ref.namedRepository, | ||||
| 				digest:          ref.digest, | ||||
| 			} | ||||
| 		} | ||||
| 		return ref.namedRepository | ||||
| 	} | ||||
| 	if ref.digest == "" { | ||||
| 		return taggedReference{ | ||||
| 			namedRepository: ref.namedRepository, | ||||
| 			tag:             ref.tag, | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return ref | ||||
| } | ||||
|  | ||||
| type reference struct { | ||||
| 	namedRepository | ||||
| 	tag    string | ||||
| 	digest digest.Digest | ||||
| } | ||||
|  | ||||
| func (r reference) String() string { | ||||
| 	return r.Name() + ":" + r.tag + "@" + r.digest.String() | ||||
| } | ||||
|  | ||||
| func (r reference) Tag() string { | ||||
| 	return r.tag | ||||
| } | ||||
|  | ||||
| func (r reference) Digest() digest.Digest { | ||||
| 	return r.digest | ||||
| } | ||||
|  | ||||
| type repository struct { | ||||
| 	domain string | ||||
| 	path   string | ||||
| } | ||||
|  | ||||
| func (r repository) String() string { | ||||
| 	return r.Name() | ||||
| } | ||||
|  | ||||
| func (r repository) Name() string { | ||||
| 	if r.domain == "" { | ||||
| 		return r.path | ||||
| 	} | ||||
| 	return r.domain + "/" + r.path | ||||
| } | ||||
|  | ||||
| func (r repository) Domain() string { | ||||
| 	return r.domain | ||||
| } | ||||
|  | ||||
| func (r repository) Path() string { | ||||
| 	return r.path | ||||
| } | ||||
|  | ||||
| type digestReference digest.Digest | ||||
|  | ||||
| func (d digestReference) String() string { | ||||
| 	return digest.Digest(d).String() | ||||
| } | ||||
|  | ||||
| func (d digestReference) Digest() digest.Digest { | ||||
| 	return digest.Digest(d) | ||||
| } | ||||
|  | ||||
| type taggedReference struct { | ||||
| 	namedRepository | ||||
| 	tag string | ||||
| } | ||||
|  | ||||
| func (t taggedReference) String() string { | ||||
| 	return t.Name() + ":" + t.tag | ||||
| } | ||||
|  | ||||
| func (t taggedReference) Tag() string { | ||||
| 	return t.tag | ||||
| } | ||||
|  | ||||
| type canonicalReference struct { | ||||
| 	namedRepository | ||||
| 	digest digest.Digest | ||||
| } | ||||
|  | ||||
| func (c canonicalReference) String() string { | ||||
| 	return c.Name() + "@" + c.digest.String() | ||||
| } | ||||
|  | ||||
| func (c canonicalReference) Digest() digest.Digest { | ||||
| 	return c.digest | ||||
| } | ||||
							
								
								
									
										147
									
								
								vendor/github.com/distribution/distribution/v3/reference/regexp.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								vendor/github.com/distribution/distribution/v3/reference/regexp.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,147 @@ | ||||
| package reference | ||||
|  | ||||
| import "regexp" | ||||
|  | ||||
| var ( | ||||
| 	// alphaNumericRegexp defines the alpha numeric atom, typically a | ||||
| 	// component of names. This only allows lower case characters and digits. | ||||
| 	alphaNumericRegexp = match(`[a-z0-9]+`) | ||||
|  | ||||
| 	// separatorRegexp defines the separators allowed to be embedded in name | ||||
| 	// components. This allow one period, one or two underscore and multiple | ||||
| 	// dashes. Repeated dashes and underscores are intentionally treated | ||||
| 	// differently. In order to support valid hostnames as name components, | ||||
| 	// supporting repeated dash was added. Additionally double underscore is | ||||
| 	// now allowed as a separator to loosen the restriction for previously | ||||
| 	// supported names. | ||||
| 	separatorRegexp = match(`(?:[._]|__|[-]*)`) | ||||
|  | ||||
| 	// nameComponentRegexp restricts registry path component names to start | ||||
| 	// with at least one letter or number, with following parts able to be | ||||
| 	// separated by one period, one or two underscore and multiple dashes. | ||||
| 	nameComponentRegexp = expression( | ||||
| 		alphaNumericRegexp, | ||||
| 		optional(repeated(separatorRegexp, alphaNumericRegexp))) | ||||
|  | ||||
| 	// domainComponentRegexp restricts the registry domain component of a | ||||
| 	// repository name to start with a component as defined by DomainRegexp | ||||
| 	// and followed by an optional port. | ||||
| 	domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) | ||||
|  | ||||
| 	// DomainRegexp defines the structure of potential domain components | ||||
| 	// that may be part of image names. This is purposely a subset of what is | ||||
| 	// allowed by DNS to ensure backwards compatibility with Docker image | ||||
| 	// names. | ||||
| 	DomainRegexp = expression( | ||||
| 		domainComponentRegexp, | ||||
| 		optional(repeated(literal(`.`), domainComponentRegexp)), | ||||
| 		optional(literal(`:`), match(`[0-9]+`))) | ||||
|  | ||||
| 	// TagRegexp matches valid tag names. From docker/docker:graph/tags.go. | ||||
| 	TagRegexp = match(`[\w][\w.-]{0,127}`) | ||||
|  | ||||
| 	// anchoredTagRegexp matches valid tag names, anchored at the start and | ||||
| 	// end of the matched string. | ||||
| 	anchoredTagRegexp = anchored(TagRegexp) | ||||
|  | ||||
| 	// DigestRegexp matches valid digests. | ||||
| 	DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) | ||||
|  | ||||
| 	// anchoredDigestRegexp matches valid digests, anchored at the start and | ||||
| 	// end of the matched string. | ||||
| 	anchoredDigestRegexp = anchored(DigestRegexp) | ||||
|  | ||||
| 	// NameRegexp is the format for the name component of references. The | ||||
| 	// regexp has capturing groups for the domain and name part omitting | ||||
| 	// the separating forward slash from either. | ||||
| 	NameRegexp = expression( | ||||
| 		optional(DomainRegexp, literal(`/`)), | ||||
| 		nameComponentRegexp, | ||||
| 		optional(repeated(literal(`/`), nameComponentRegexp))) | ||||
|  | ||||
| 	// anchoredNameRegexp is used to parse a name value, capturing the | ||||
| 	// domain and trailing components. | ||||
| 	anchoredNameRegexp = anchored( | ||||
| 		optional(capture(DomainRegexp), literal(`/`)), | ||||
| 		capture(nameComponentRegexp, | ||||
| 			optional(repeated(literal(`/`), nameComponentRegexp)))) | ||||
|  | ||||
| 	// ReferenceRegexp is the full supported format of a reference. The regexp | ||||
| 	// is anchored and has capturing groups for name, tag, and digest | ||||
| 	// components. | ||||
| 	ReferenceRegexp = anchored(capture(NameRegexp), | ||||
| 		optional(literal(":"), capture(TagRegexp)), | ||||
| 		optional(literal("@"), capture(DigestRegexp))) | ||||
|  | ||||
| 	// IdentifierRegexp is the format for string identifier used as a | ||||
| 	// content addressable identifier using sha256. These identifiers | ||||
| 	// are like digests without the algorithm, since sha256 is used. | ||||
| 	IdentifierRegexp = match(`([a-f0-9]{64})`) | ||||
|  | ||||
| 	// ShortIdentifierRegexp is the format used to represent a prefix | ||||
| 	// of an identifier. A prefix may be used to match a sha256 identifier | ||||
| 	// within a list of trusted identifiers. | ||||
| 	ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) | ||||
|  | ||||
| 	// anchoredIdentifierRegexp is used to check or match an | ||||
| 	// identifier value, anchored at start and end of string. | ||||
| 	anchoredIdentifierRegexp = anchored(IdentifierRegexp) | ||||
|  | ||||
| 	// anchoredShortIdentifierRegexp is used to check if a value | ||||
| 	// is a possible identifier prefix, anchored at start and end | ||||
| 	// of string. | ||||
| 	anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) | ||||
| ) | ||||
|  | ||||
| // match compiles the string to a regular expression. | ||||
| var match = regexp.MustCompile | ||||
|  | ||||
| // literal compiles s into a literal regular expression, escaping any regexp | ||||
| // reserved characters. | ||||
| func literal(s string) *regexp.Regexp { | ||||
| 	re := match(regexp.QuoteMeta(s)) | ||||
|  | ||||
| 	if _, complete := re.LiteralPrefix(); !complete { | ||||
| 		panic("must be a literal") | ||||
| 	} | ||||
|  | ||||
| 	return re | ||||
| } | ||||
|  | ||||
| // expression defines a full expression, where each regular expression must | ||||
| // follow the previous. | ||||
| func expression(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	var s string | ||||
| 	for _, re := range res { | ||||
| 		s += re.String() | ||||
| 	} | ||||
|  | ||||
| 	return match(s) | ||||
| } | ||||
|  | ||||
| // optional wraps the expression in a non-capturing group and makes the | ||||
| // production optional. | ||||
| func optional(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	return match(group(expression(res...)).String() + `?`) | ||||
| } | ||||
|  | ||||
| // repeated wraps the regexp in a non-capturing group to get one or more | ||||
| // matches. | ||||
| func repeated(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	return match(group(expression(res...)).String() + `+`) | ||||
| } | ||||
|  | ||||
| // group wraps the regexp in a non-capturing group. | ||||
| func group(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	return match(`(?:` + expression(res...).String() + `)`) | ||||
| } | ||||
|  | ||||
| // capture wraps the expression in a capturing group. | ||||
| func capture(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	return match(`(` + expression(res...).String() + `)`) | ||||
| } | ||||
|  | ||||
| // anchored anchors the regular expression by adding start and end delimiters. | ||||
| func anchored(res ...*regexp.Regexp) *regexp.Regexp { | ||||
| 	return match(`^` + expression(res...).String() + `$`) | ||||
| } | ||||
							
								
								
									
										649
									
								
								vendor/github.com/docker/cli/cli/compose/schema/bindata.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										649
									
								
								vendor/github.com/docker/cli/cli/compose/schema/bindata.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,649 +0,0 @@ | ||||
| // Code generated by "esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data"; DO NOT EDIT. | ||||
|  | ||||
| package schema | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"compress/gzip" | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| type _escLocalFS struct{} | ||||
|  | ||||
| var _escLocal _escLocalFS | ||||
|  | ||||
| type _escStaticFS struct{} | ||||
|  | ||||
| var _escStatic _escStaticFS | ||||
|  | ||||
| type _escDirectory struct { | ||||
| 	fs   http.FileSystem | ||||
| 	name string | ||||
| } | ||||
|  | ||||
| type _escFile struct { | ||||
| 	compressed string | ||||
| 	size       int64 | ||||
| 	modtime    int64 | ||||
| 	local      string | ||||
| 	isDir      bool | ||||
|  | ||||
| 	once sync.Once | ||||
| 	data []byte | ||||
| 	name string | ||||
| } | ||||
|  | ||||
| func (_escLocalFS) Open(name string) (http.File, error) { | ||||
| 	f, present := _escData[path.Clean(name)] | ||||
| 	if !present { | ||||
| 		return nil, os.ErrNotExist | ||||
| 	} | ||||
| 	return os.Open(f.local) | ||||
| } | ||||
|  | ||||
| func (_escStaticFS) prepare(name string) (*_escFile, error) { | ||||
| 	f, present := _escData[path.Clean(name)] | ||||
| 	if !present { | ||||
| 		return nil, os.ErrNotExist | ||||
| 	} | ||||
| 	var err error | ||||
| 	f.once.Do(func() { | ||||
| 		f.name = path.Base(name) | ||||
| 		if f.size == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 		var gr *gzip.Reader | ||||
| 		b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) | ||||
| 		gr, err = gzip.NewReader(b64) | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		f.data, err = ioutil.ReadAll(gr) | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return f, nil | ||||
| } | ||||
|  | ||||
| func (fs _escStaticFS) Open(name string) (http.File, error) { | ||||
| 	f, err := fs.prepare(name) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return f.File() | ||||
| } | ||||
|  | ||||
| func (dir _escDirectory) Open(name string) (http.File, error) { | ||||
| 	return dir.fs.Open(dir.name + name) | ||||
| } | ||||
|  | ||||
| func (f *_escFile) File() (http.File, error) { | ||||
| 	type httpFile struct { | ||||
| 		*bytes.Reader | ||||
| 		*_escFile | ||||
| 	} | ||||
| 	return &httpFile{ | ||||
| 		Reader:   bytes.NewReader(f.data), | ||||
| 		_escFile: f, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Close() error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { | ||||
| 	if !f.isDir { | ||||
| 		return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name) | ||||
| 	} | ||||
|  | ||||
| 	fis, ok := _escDirs[f.local] | ||||
| 	if !ok { | ||||
| 		return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local) | ||||
| 	} | ||||
| 	limit := count | ||||
| 	if count <= 0 || limit > len(fis) { | ||||
| 		limit = len(fis) | ||||
| 	} | ||||
|  | ||||
| 	if len(fis) == 0 && count > 0 { | ||||
| 		return nil, io.EOF | ||||
| 	} | ||||
|  | ||||
| 	return fis[0:limit], nil | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Stat() (os.FileInfo, error) { | ||||
| 	return f, nil | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Name() string { | ||||
| 	return f.name | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Size() int64 { | ||||
| 	return f.size | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Mode() os.FileMode { | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (f *_escFile) ModTime() time.Time { | ||||
| 	return time.Unix(f.modtime, 0) | ||||
| } | ||||
|  | ||||
| func (f *_escFile) IsDir() bool { | ||||
| 	return f.isDir | ||||
| } | ||||
|  | ||||
| func (f *_escFile) Sys() interface{} { | ||||
| 	return f | ||||
| } | ||||
|  | ||||
| // _escFS returns a http.Filesystem for the embedded assets. If useLocal is true, | ||||
| // the filesystem's contents are instead used. | ||||
| func _escFS(useLocal bool) http.FileSystem { | ||||
| 	if useLocal { | ||||
| 		return _escLocal | ||||
| 	} | ||||
| 	return _escStatic | ||||
| } | ||||
|  | ||||
| // _escDir returns a http.Filesystem for the embedded assets on a given prefix dir. | ||||
| // If useLocal is true, the filesystem's contents are instead used. | ||||
| func _escDir(useLocal bool, name string) http.FileSystem { | ||||
| 	if useLocal { | ||||
| 		return _escDirectory{fs: _escLocal, name: name} | ||||
| 	} | ||||
| 	return _escDirectory{fs: _escStatic, name: name} | ||||
| } | ||||
|  | ||||
| // _escFSByte returns the named file from the embedded assets. If useLocal is | ||||
| // true, the filesystem's contents are instead used. | ||||
| func _escFSByte(useLocal bool, name string) ([]byte, error) { | ||||
| 	if useLocal { | ||||
| 		f, err := _escLocal.Open(name) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		b, err := ioutil.ReadAll(f) | ||||
| 		_ = f.Close() | ||||
| 		return b, err | ||||
| 	} | ||||
| 	f, err := _escStatic.prepare(name) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return f.data, nil | ||||
| } | ||||
|  | ||||
| // _escFSMustByte is the same as _escFSByte, but panics if name is not present. | ||||
| func _escFSMustByte(useLocal bool, name string) []byte { | ||||
| 	b, err := _escFSByte(useLocal, name) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| // _escFSString is the string version of _escFSByte. | ||||
| func _escFSString(useLocal bool, name string) (string, error) { | ||||
| 	b, err := _escFSByte(useLocal, name) | ||||
| 	return string(b), err | ||||
| } | ||||
|  | ||||
| // _escFSMustString is the string version of _escFSMustByte. | ||||
| func _escFSMustString(useLocal bool, name string) string { | ||||
| 	return string(_escFSMustByte(useLocal, name)) | ||||
| } | ||||
|  | ||||
| var _escData = map[string]*_escFile{ | ||||
|  | ||||
| 	"/data/config_schema_v3.0.json": { | ||||
| 		name:    "config_schema_v3.0.json", | ||||
| 		local:   "data/config_schema_v3.0.json", | ||||
| 		size:    11063, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xaT4/buA6/+1MYam/NzBR4xQNeb++4p93zDlxDsZlEHVlSKTmdtMh3X8iOHduRJSVx | ||||
| t8ViByiQyiTFf/qRov09SVPyVhc7qCj5mJKdMerj09NnLcVDu/oocftUIt2Yh/cfntq1N2Rl+VhpWQop | ||||
| Nmybt0/y/X8e3z9a9pbEHBRYIrn+DIVp1xC+1AzBMj+TPaBmUpBsldhnCqUCNAw0+Zha5dK0J+kWBmK1 | ||||
| QSa2pFk+NhLSlGjAPSsGEnpV3zyd5T/1ZKup1IGyzbqixgCKPy51ax5/eqYP3/7/8Of7h/895g/Zu7ej | ||||
| x9a/CJt2+xI2TDDDpOj3Jz3l8fTr2G9My7Ihpny094ZyDWObBZivEl9CNvdkP8nm0/4Om8fm7CWvq2AE | ||||
| O6qfZEy7/X3xSzqjvbQtxWDvRsFRtrtc5cq2eV/1zprxUgmKy4Ndm/FHS1CBMKR3QZqSdc14OfWoFPC7 | ||||
| FfE8WEzT79ODPZDTPB/9bz7g/fMZW/rnhRQGXk1jlH/r1gWyeAHcMA6xHBS32uMyzrTJJeYlKww5Ttgv | ||||
| 5IXzaZqK9i9LHAJJQVVOy3JkB0WkB7JKCTNQabeJKakF+1LDbycSgzVM5ZYo1fKCtyhrlSuKNsH87ieF | ||||
| rCoqlsq6a+yI8LwUhjIBmAtahRLJnjoQpc7b+udNo03e8uuJgL4YLhqPUvgSuxVjU9vqRiaMuQaKxe5G | ||||
| fllRJmJ8B8LgQUnW5ssvlwgg9nmPJVe7AcSeoRRVdxpiAKYHecv/qqSGqWMmBg4f9aYmLgh+7gxfpUTU | ||||
| 1RrQtnQjyo3Eilplu72TGaxzZN7QgUMbbFmnPOdMvCyf4vBqkOY7qY2+wsU9+w4oN7tiB8WLh31INeKW | ||||
| 2sQkOavoNkykihAJp2vgN9m5qPMHYuV2a0nnMu6ic4ms+SWyPWBsAZfq3HClF3+hBiSi+xyRfnpsm0/P | ||||
| qWp+cU6yo0PE5dp4ZWJhXEMxikpFC9s3IGgdyqhTs59XspxL0AtiHYvUVxfC2/rHqNAFLxABa+bUuybL | ||||
| YlL/HHbOqAZ9W0dxIY2p/YfInHDx/tfLO8M6KzO+Rw6IOqvSHDeXIlkSOn8/tIVXrJzHigYhhgdMSTT6 | ||||
| 55T7duu7q71CtmcctjC+tayl5EDFCHoQaJlLwQ8RlNpQDF4oNBQ1MnPIpTKL9xl6V+WafYNxNM94fxKU | ||||
| jXgOujC31WttSiZyqUAEvaONVPkWaQG5AmSydBm4Gsa6rJHa/S/FaLYVlIccbSq1ufFiYUw43DVnFZs/ | ||||
| Bw6AjagBLf67Yd8D+WdNmTCwBXQhpafr8DcdEd3GjuI4oB492jjKjXEzJJG4Oh7+NvJWJ0UyJ/1VcD5V | ||||
| I5tF1KMTUWsdbAwbGqF9TU1POphiLooXtlGyh6Bk6KuZt8yRJ3cW30RxSBqcwPqnm6HJI9N0PZm5uQ63 | ||||
| zUbchzEGwSCbxOWEtiM8Af1rDg4Mq0DWxhv7ZMBEBpPZQFAHlNOYPvdB7fqLYOBiDgmC4qygOgREd1xQ | ||||
| a1VSA3n7ouoq6PdgvqJIOQfOdBWDoaQETg83lc+2m6KM1wg5LczpXVgg50glBTMSb9+yoq95t21D4jww | ||||
| s21d7N1y2IrJGgvQS4XoXOtnMqbb8cJ0BG2RpL/6B/kX9YJtSHMlOSsOS7mikKLVIyZz7kxVmze2Z6qU | ||||
| 0VFH4ysTpfx6xYbLeVtxWsAEGO91tDZImTBX1/17zbqj7PeJHCgPPV34letMSShUHRwcVVBJPCzd2nTv | ||||
| ngMmdmQLlL+oSeOJyl4sF7+WhKeJWbgpZopWS52O6NkrcRbrwMzCM7eIG6OFb01E12sBJm5U5XwhHH+f | ||||
| Oc7fXu4Dve61yUxUn/vmetX7KosO8ew7i+X0b/r86SzBdSG4smW8A1xO34IEsOVE9S+0/EMS8e/Lr8nY | ||||
| a5BnlzdSX0pEz/uT4QW0V2NK5vgkbwzLvjFH4p//TjY9OdFv+YIZ/vjOU3x87+V+EGovMEJyx3TSsSb9 | ||||
| xHv6XdkMqA34L74ys3aKw8XE5Pt4DNh+IZaN/DMhad9yDyAlGzbxc2F0fns2HUJ234BlbrgaD1QS+++Y | ||||
| /BUAAP//72YpJjcrAAA= | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.1.json": { | ||||
| 		name:    "config_schema_v3.1.json", | ||||
| 		local:   "data/config_schema_v3.1.json", | ||||
| 		size:    12209, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+waS4/bNvOuXyEwucW7mw9fUKC59dhTe+7CEbjSWGaWIpkh5awT+L8X1Mt6krStdIOi | ||||
| CwRwqJnhvGc45Pcojslbne6hoORjTPbGqI8PD5+1FHf16r3E/CFDujN37z881GtvyMbiscyipFLsWJ7U | ||||
| X5LD/+//d2/RaxBzVGCB5NNnSE29hvClZAgW+ZEcADWTgmw3kf2mUCpAw0CTj7FlLo47kHahR1YbZCIn | ||||
| 1fKpohDHRAMeWNqj0LH65uFM/6ED24yp9pit1hU1BlD8OeWt+vzpkd59++3ur/d3v94nd9t3bwefrX4R | ||||
| dvX2GeyYYIZJ0e1POshT8+vUbUyzrAKmfLD3jnINQ5kFmK8Sn30yd2CvJHOz/4zMQ3EOkpeF14It1CsJ | ||||
| U2+/jv00pAjG77I11Kt5rN3+NoGjVmgnbA3R27ticBDec6qaC69lXXXKWtBSBorLo11b0EcNUIAwpFNB | ||||
| HJOnkvFsrFEp4A9L4rG3GMffx5msR6f6PvjfssG77wuydN9TKQy8mEoo99a1CmT6DLhjHEIxKObaoTLO | ||||
| tEkkJhlLDTmN0Cf0/P40dkX7t41mCJKUqoRm2UAOikiPZBMTZqDQ8yLGpBTsSwm/NyAGSxjTzVCq9Qnn | ||||
| KEuVKIrWwdzqJ6ksCirW8rpL5AjQvBSGMgGYCFr4HMlGHYhMJ3XBd7rRLqnx9YhAV/1XtUcmXI5dk7Gu | ||||
| bXkjI8REA8V0fyW+LCgTIboDYfCoJKv95adzBBCHpMslF6sBxIGhFEUbDSEJpkvyFv9FSQ1jxYwE7H/q | ||||
| RI3mUvBjK/gmJqIsngBtDzuA3EksqGW23TtayHUzntdXYF8GW9YpTzgTz+u7OLwYpMleaqMvUHGHvgfK | ||||
| zT7dQ/rsQO9DDbClNiFOzgqa+4FU6gPh9An4VXKuqvweWZnnFnTJ4yadS2DNz5AdAEMLuFTnhiue/Pka | ||||
| kIDucwD66b5uPh1RVf3inGxPMySma8OVkYRhDcXAKgVNbd+AoLXPo5rTTVLIbMlBJ8A6NFNfXAiv6x+D | ||||
| TOc9QHikWWLvEi8Lcf2z2TmjGvR1HcWEGlOHD4E+MYf7ixN3AXWRZniP7CF1ZqUKtzlGtpEv/n5oC69Y | ||||
| tpwrqgzRDzAl0ejXKff11jdXe4XswDjkMDy1PEnJgYpB6kGgWSIFPwZAakPRe6DQkJbIzDGRyqzeZ+h9 | ||||
| kWj2DYbWPOf7htB2xNBoQnKlQZdSkj+MZxKhN1H5UxTRssQUghMJMRRzMOHw5TBs3MD5JcCTQteY8OTP | ||||
| E9FSXjnNhr4+6tRc161pkzGRSAXCGxvaSJXkSFNIFCCTs6rY9CM9K5Ha/adkNMsF5b4wM4XaXXmsNMYf | ||||
| 7CVnBVsOmhmvDegA6uo/X/QdBf/MKRMGcusmU6dy9JzuljOg19xTHBrUwUcTmDszjxAFVtXhXUdFb9Mw | ||||
| sp2Fv6iYj9nYLtbT+aAqtfdYUMEI7WppO9De0H7VamHbZBsEGUNXx3TN2H10YnXNk/ug3vm7e7btmzsz | ||||
| TZ9GE9e54LbeiAd/jkEwyEZ2aRN1P5+A/jnHRoYVIEvjtH3UQyK9ubzHqD3IsU0fO6O23aXXcCFBgqA4 | ||||
| S6n2JaIbxhOlyqiBpL6XvSj1O3K+okg5B850EZJDSQacHq8qn3UvTRkvERKamubq1+NzpJCCGYnXb1nQ | ||||
| l6TdtgLxdTbDpj50stBvxKvGT69lonOtX/CYdseJ6AjaZpJu8OPFX1UL9jiSKMlZelxLFakUNR8hnnOj | ||||
| q1q/sT1ToYwOCo2vTGTy6wUbrqdtxWkKo8R4q6K1QcqEubju3yrWDWW/c2RPeejg/BfuCyUhVaV3bFhA | ||||
| IfG4dmvTPrXwiNiCrVD+gubMDVQi1frHEv8seetvipmixVrRETx5J7PF2jPgcAw51ptNlE8CTNigcvY5 | ||||
| QPh55rR8erkt6bWXZgtWfeya602nq22wiRdvrNbjv+rzx7OEuQPBhS3jDcmlefrkyS0N1H+p5V/iiP+c | ||||
| fzUvzbxPvCqoq4tzwLumn8Bmr22K4QSyZ5LpcMClyeCLt6g/C+jYGIPNPAYeVkjXxClyX8SMNm2U6JZ8 | ||||
| xWRz/87RB7guyH9QAV1hmjdv09HhIepuesYPPBfiv4c/ee5p5RTHyfDq+3AiWz/V3A70MwKpn5v0svu2 | ||||
| f55aMuPsI9DxPLh9jLlw/TGcbUX23yn6OwAA//8cyfJJsS8AAA== | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.2.json": { | ||||
| 		name:    "config_schema_v3.2.json", | ||||
| 		local:   "data/config_schema_v3.2.json", | ||||
| 		size:    13755, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xbzW7cOBK+91MISm7xT7AbLLC57XFPM+cxFIFNVasZUyRTpNruBH73gaSWWpREkeqW | ||||
| 48xgDARwqGKR9cuvWPSPTRTF7zXdQ0Hiz1G8N0Z9vr//qqW4bUbvJOb3GZKduf346b4ZexffVPNYVk2h | ||||
| UuxYnjZf0sO/7/51V01vSMxRQUUkt1+BmmYM4VvJEKrJD/EBUDMp4uRmU31TKBWgYaDjz1G1uSjqSNqB | ||||
| HlttkIk8rodfag5RFGvAA6M9Dt1W392f+d93ZDdDrr3N1uOKGAMofh/vrf785YHcfv/f7R8fb/97l94m | ||||
| H95bnyv9Iuya5TPYMcEMk6JbP+4oX06/vXQLkyyriQm31t4RrsGWWYB5kvjok7kjeyOZT+tPyGyLc5C8 | ||||
| LLwWbKneSJhm+XXsp4EiGL/LNlRv5rHV8tcJvGmFnqVtKHpr1xu0wntKVVPh5dZVpyyHljJQXB6rMYc+ | ||||
| GoIChIk7FURRvC0Zz4YalQJ+q1g89Aaj6Mcwk/X41N+t/7kN3n13yNJ9p1IYeDa1UPNLNyqQ9BFwxziE | ||||
| ziCY6xmVcaZNKjHNGDWT8ymhe0h3KAsvl13a7EPHLwM+I8Z+xxz6dPWTbCYYxpSolGSZpRCCSI7xTRQz | ||||
| A4We1lUUl4J9K+H/JxKDJQz5ZijV+oxzlKVKFcHKU+ftGFNZFESs5b5L5AjQvBSGMAGYClL4PLIKXxCZ | ||||
| ThvkEOpJFoMORqxqj0zMRUjDpoqRam/xYGKqgSDdXzhfFoSJEN2BMHhUkjX+8ss5AohD2iWlxWoAcWAo | ||||
| RdFGQ1im6s1/VlLDUDEDAfufOlE3U7n8oRX8JopFWWwBKzBsUe4kFqTabLv2xpHrJjyvr8C+DBU+IDzl | ||||
| TDyu7+LwbJCke6nNJYdBvAfCzZ7ugT7OTO9TWbOlNiFOzgqS+4kU9ZFwsgV+kZyrKr/HVuZ5ReryuBEE | ||||
| CgQPGbIDYCgSkOqM3KLRjw/JBMBYi/TLXYNiZ6Kq/o3zOHmZYDEes0cGEoYBCssqBaEVbkDQ2udRpzIp | ||||
| LWTmctARsQ7N1IsPwsuAaJDpvJWIRxrX9pZ4WYjrn83OGdGgL0MUI25MHT4F+sTU3P/MznVMdfIMx8ge | ||||
| Vuet1OE2tZFk44u/V4XwimXuXFFniH6AKYlGX3/cuzy4r642T50P/GbxkTZG5g6atFkeH/7IiGey1FRE | ||||
| EszBLkOYMJADOiaocsuZ3kO2ZA5KI6nkYYExWceGB4PNMLkamylkB8YhH0i8lZIDEdZBgUCyVAp+DKDU | ||||
| hqC3/NNAS2TmmEplVkeFel+kmn0HO/bOXn9ilAw2NLgYe7Xwc7ntK4WNliXS6wJnlr60k9w8cb6EeBTw | ||||
| JxO++LO6O1QmE7U+amouw9baZEykUoHwxoY2UqU5EgqpAmRyUhVWgs1KJNX6Yzaa5YJwX5iZQu0uvAQw | ||||
| xh/sJWcFcwfNhNcG4LUGq01DtBl4FpSyZyqE+QIhoDLYE1xwdNSBuXOcT5tADGS3uGp+N6eNJJP0i6DX | ||||
| cBuJE/1MB1WpvUVcTSN0GnC0T/Rq/hoZ2rJRTZ5clMdPKwXmztfO+sGIwG4KaKYNCHoMX2jLRrfES+uu | ||||
| sKqrpiJ5k2+DC53wWD218X6KKEJSqRymCRfjlQHs4KZjBra6MsyTxMfq/MoYzlnskkbp4GpwrgPYJ/V2 | ||||
| TOe7kb5OIdNkO+iRTZ3L1UGCBz88QDDIBp2HFmP1oQDoX/N+3rACZGlmbb/pTYp7nVSPUXuUQ5s+dEZt | ||||
| y3iv4ULONxBZ3QkJOgwRFGeUaB/guOLSuFQZMZA2z24WQbwZbKcIEs6BM12EYKU4A06OF8HkpqFBGC8R | ||||
| UkKdWX0wo5CCGYmXL1mQ57RdtibxVTB28R5639svuOujXq9lojOmd3hMu+JIdARdpZ3uOt47f1UtGIIm | ||||
| VZKzBl2soQoqRbOPEM+50lUrv6lqo0IZHRQaT0xk8mnBgutpW3FCYZBFr1W0NkiYMIvbVNeKdQVG6BzZ | ||||
| c5Z0dP73VI7zg6rS28wpoJB4XBsHtS/pPCK2ZCuclUHdvxNVKtX61w/+Dl/iL36ZIsVa0RHcD40nD2tP | ||||
| mTxTKq93B1luBZg3uCVfMem1TxkcVn3okPhNp6sk2MTOdwTr7b8uCoZ3hlPVAzGG0H1QobEQXV6Rh0bV | ||||
| 82QaOlH9k4X+Jj778/zr9ObY+9i3prr4HA944foL2OytTWE3JXomGV86zGly6avexN7GkGziz0Lsw3Su | ||||
| ZbmZv+QaLHpS4rzkKyabuw8zkGHuhdMrnbUrtIOnbTqoMzZd83f41N8R/735o4f/lZziOLoU+2E3AJpH | ||||
| +4mlnwFJ816wl92TfunlMuPknwMM2w/ts3xHR9S+M9tU/142fwYAAP//CLvrnLs1AAA= | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.3.json": { | ||||
| 		name:    "config_schema_v3.3.json", | ||||
| 		local:   "data/config_schema_v3.3.json", | ||||
| 		size:    15491, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+wbzW7bPPLupzDU3uokBVossL3tcU+75w1UgabGNhuKZIeUE7fIuy8oybJEUSJtK036 | ||||
| fS1QIJaGQ87/H/VzsVwm7zXdQUGSL8tkZ4z6cnf3TUtxUz+9lbi9y5FszM3Hz3f1s3fJyq5juV1Cpdiw | ||||
| bVa/yfafbj/d2uU1iDkosEBy/Q2oqZ8hfC8Zgl18n+wBNZMiSVcL+06hVICGgU6+LO3hlssW5Pigg1Yb | ||||
| ZGKbVI+fKwzLZaIB94x2MLRHfXd3wn/Xgq1crJ3DVs8VMQZQ/Hd4tur113ty8+NfN//7ePPP2+wm/fC+ | ||||
| 99ryF2FTb5/DhglmmBTt/kkL+dz89dxuTPK8Aia8t/eGcA19mgWYR4kPIZpbsFeiudnfQ3OfnL3kZRGU | ||||
| 4BHqlYipt59HfhooggmrbA31ahprt5+H4NprhAg+Qr0SwfX21xG8OBI9CVtDdPauDtjzZz5W+fzJOK9a | ||||
| Zo1wKQfF5cE+G+FHDVCAMEnLguUyWZeM5y5HpYD/WBT3nYfL5U/XdXfwVO97v8YF3r4foaV9T6Uw8GQq | ||||
| oqa3rlkg6QPghnGIXUGw1uIRlnGmTSYxyxk13vWcrIFfhYESuoNsg7IIYtlkNSU6eXbwDBCHVdu1Cvsv | ||||
| XXgQJpSojOR5j6UEkRyS1TJhBgrt5/YyKQX7XsK/GxCDJbh4c5RqfsRblKXKFEGr69OakFBZFETMZQDn | ||||
| 0BHB+YGb7VlVs0f3Vbtb71gj1CwjbMRjlAGjDpu19YqyRBprpXZPglsw8fAly+OBt+cAFzLvn1uUxRpw | ||||
| YJJ9yxr+The+N470DWECMBOkgKAeI+QgDCM80wromM54hDYlriTSmSYIW6YNHrywixFPFeelulTmoEDk | ||||
| OqsLilhv2UPQVhez+pxcTEWBGo2NA/ZsibMw00CQ7i5cLwvCRIyGgDB4UJLVPvHNOTsQ+6zVtrPZAGLP | ||||
| UIri6PHjonFn/ZOSGq73tM2K+yPhq9ZBpI7FbCQWxB72uPeolQw1r8vALg02iyY840w8zK/i8GSQZDup | ||||
| zSUJT7IDws2O7oA+TCzvQvVWS21ilJwVZBsGUjQEcnFil8zK/A5aud1a0DGNGxQKkSl2jmwPGJsvS3Wq | ||||
| b3xhOpQaBIu9HujX27rWm7Cq6i/Ok/TZgyIUk90gFhuOTlIpCLW5MYLWIY1quifZIIE4wQ6AdaynPjsQ | ||||
| XlauRYkuWK8H09Kx1DNey+LS0KPYOSMa9GUZxQAbU/vPkTrhW/uPybUjS0dxxteBAVTdfJdz70HScAb8 | ||||
| kmWq6mfxfV9ReYiugSmJ5pcUVic/dQr49ebDWssVd9SilynQJrxUXHnGhIGtrYv8QaBcc6Z3kJ+zBqWR | ||||
| VPI4w/B2e+KNYaJYuyg3U8j2jMPWoXgtJQcieoECgeSZFPwQAakNwWCLQwMtkZlDJpWZPSvUuyLT7Af0 | ||||
| be+k9Q2i1DmQ0y//09f4+/Q19EFTc1lurU3ORCYViKBtaCNVtkVCIVOATHpZ0XOweYnE7j9Eo9lWEB4y | ||||
| M1OozYVNAGPCxl5yVrBxo/E2doL5Wp2r+VO0ifQsymVPVAjTBUJEZbAjeEboqAxzMxKfFpE5UH/yXeFb | ||||
| NQdJvfBnpV7uMdLR7MdvVKUOFnEVjNBZRGj3jHB/Dw/dk1EFnl7kx5udIn3nS3v96IygPzrTTBsQ9BC/ | ||||
| 0ZoNJiHn1l1xVVcFRba1v40udOJttZnu/xJShKRSjYgmnowXTmCdTsdE2jrmYR4lPtj4lTOcktgl1wmc | ||||
| 1uDUnLwLGrxXMD2zD83TmSZrZ/jhi8s2kOA+nB4gGGTO5OGYY3VTAdBvsz9vWAGyNJOyX3QWJZ37BgGh | ||||
| diBdmd63Qj2W8UHBxcQ3EHk1CYkKhgiKM0p0KOG4omlcqpwYyJorKzPN7hRBwjlwpouYXCnJgZPDRWly | ||||
| PdAgjJcIGaGjXt1ZUUjBjMTLtyzIU3bctgIJVTBXjh8R6lCv5xLRKacf0Zjjjp6Bq7Zup23HB9fPygVD | ||||
| 0GRKclZnF3OwgkpRnyNGc65UVas3tjYqlNFRpvHIRC4fz9hwPm4rTig4XvRaRmuDhAlz9pjKZYtC2ACC | ||||
| oN4MaaJcmCgZ5uvFKJs3v0K38FrhX5FJteYeiLgtXPhu5kiUpaoMjrwKKOT0lZArbkuHSDyCzZBRRM1I | ||||
| G6hMqvmbNOE5aBpuETBFirl8SPTUOPGmNG/BO5RrAeY39A6r4YWPEanet/XKquVVGi3i0dsW852/Kp3c | ||||
| zqqvxiLGELqLKsfOzMGv8EODHoPXDTVQf7zQX0Rnf51+NR9sBD8cqKAujuMRFzzfgMxeWRSDIOYVRQP1 | ||||
| RxQvahX9KVpHJMMu2RQnz/3UIu0fwwXzfN7Yz2umZuyL6a6ss2nDxGnKZ/T7tx8msrepK3kvlPbMcH/B | ||||
| L1OnMF60txXcL7jG7f+4fvA9l6VTHAZd3J/9iVX9LVba448DUl9w7QTatNsrGBOj9ysvd152/NpqZITf | ||||
| b/Iu7P/nxf8DAAD//7pHo+CDPAAA | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.4.json": { | ||||
| 		name:    "config_schema_v3.4.json", | ||||
| 		local:   "data/config_schema_v3.4.json", | ||||
| 		size:    15874, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xbT2/bOhK/+1MYeu9WOymwxQLb2x73tHvewBVoamyzoUh2SDlxi3z3hURJlihKpG2l | ||||
| Sff1AQ+NpeGQM5w/vxlSPxbLZfKnpgfISfJ5mRyMUZ/v779qKdb26Z3E/X2GZGfWHz/d22d/JKtyHMvK | ||||
| IVSKHdun9k16/Nvdp7tyuCUxJwUlkdx+BWrsM4RvBUMoBz8kR0DNpEg2q0X5TqFUgIaBTj4vy8Utly1J | ||||
| 86DDVhtkYp9Uj18qDstlogGPjHY4tEv94/7M/74lW7lcO4utnitiDKD4z3Bt1esvD2T9/Z/r/35c/+Mu | ||||
| XW8+/Nl7XeoXYWenz2DHBDNMinb+pKV8qf96aScmWVYRE96be0e4hr7MAsyTxMeQzC3ZG8lcz++RuS/O | ||||
| UfIiD+5gQ/VGwtjp59k/DRTBhE3WUr2ZxZbTzyOwjRohgRuqNxLYTn+bwItGaP8aky/P6/Lfl4rnJD/L | ||||
| pbO+SohezPOp0xdzxvXZKnREkxkoLk/Vyv06swQ5CJO0alouk23BeOZqXQr4d8niofNwufzhhvcOn+p9 | ||||
| 79e4UbTvR2Rp31MpDDybSqjpqa0KJH0E3DEOsSMIWksfURln2qQS04xR4x3PyRb4TRwooQdIdyjzIJdd | ||||
| aiXRXkZNBI+U3BDcg1+zDvFgdNi3XLcs/9ssPAwTSlRKsqy3DoJITslqmTADufYLtEwKwb4V8K+axGAB | ||||
| Lt8MpZqf8R5loVJFsHSkaWUnVOY5EXN51yVyRGh+EOd7LlvP0X3VztZb1og0ywgz9Hh8IGKEY0YZcmWB | ||||
| NDYETLuCl75gWTzx/hLiXGb9dYsi3wIOXLLvWcPfm4XvjbP7hjABmAqSQ9COETIQhhGeagV0zGY8mza1 | ||||
| XUlkpE4Q9kwbPIWiVW9cXJTqSpmBApHp1FY0l4fiJIO2vJk15mRiKsVYNmWSKdeWOANTDQTp4crxMidM | ||||
| xFgICIMnJZmNie8u2IE4pq21XawGEEeGUuRNxI9L9Z3xz0pquD3S1iMeGsFXbYDYOB6zk5iTcrHN3KNe | ||||
| MrS8rgK7MpQQmfCUM/E4v4nDs0GSHqQ216Cp5ACEmwM9AH2cGN6l6o2W2sQYOcvJPkykaIjkatSYzKr8 | ||||
| Dlu535ekYxY3qEIi8XuG7AgYC0mlOhdPvjQdggbBarNH+uXOFpsTXlX9xXmyefGwCOVkN4nFpqPzruSE | ||||
| ltgYQeuQRdXgPx0AiDPtgFjHRuqrapLLa8GorQs2DIKwdAx6xltZHAxttp0zokHfVtx1gsvxU6RN+Mb+ | ||||
| fXLsyNBRnvF1YIBVF+9y7l3IJoyAX7NMVX0U348VVYToOpiSaH5KYXWOU+eEbycf1lrudkcNep0CbSJK | ||||
| xZVnTBjYl3WRPwkUW870AbJLxqA0kkoe5xjeVlK8M0wUa1dhM4XsyDjsHYm3UnIgopcoEEiWSsFPEZTa | ||||
| EAy2ODTQApk5pVKZ2VGhPuSpZt+h73tnq68ZbZwFOQ37332Nv05fQ580Nddha20yJlKpQAR9Qxup0j0S | ||||
| CqkCZNKril6AzQok5fxDNprtBeEhNzO52l3ZBDAm7OwFZzkbdxpvYyeI1yxW80O0CXgWFbInKoTpAiGi | ||||
| MjgQvCB1VI65G8lPi0gM1D96r/it6oVsvPQXQS93GZtR9ON3qkIHi7iKRug0IrV7zpB/jQjd26OKfHNV | ||||
| HK9nioydrx31oxFB/1xOM21A0FP8RFs2OAm5tO6Kq7oqKrK38Ta60In31fp6wU8RRUgq1cjWxIvxygDW | ||||
| 6XRMwNaxCPMk8bHMXxnDqR275j6D0xqcOoTvkgYvNkxfCAgd1jNNts7hhy8vl4kEj354EMYXCAaZcx7R | ||||
| IK8uQAD9Prv2huUgC3MtuCJoLodn7rWnzt2Kpv8/ZUIdSteCHloTapoGQTOJyaYgsurcJSr1IijOKNEh | ||||
| eHNDi7pQGTGQ1jd0ZjopVAQJ58CZzmOQWZIBJ6er7MYenxDGC4SU0NEc4ozIpWBG4vVT5uQ5baatSAJe | ||||
| a70UMxibE0SRe7CR9Yv1jqE2toSWqv7VD+ozHqwiWBCj5zKHc7UyYp3NjJ6jZF0G1PagITh+Vi3YkCQ5 | ||||
| s7hpDlVQKew6Yqz0RrcobbSs+nJldJQbPjGRyafLo+8M2lacUHAi9q2K1gYJE+biAzhXLQphBwiCerHf | ||||
| RCE0UQzN12VSZUXwBn3QWzf/BozYunsgu7d04WuvIxmdqiJ4mJdDLqcvu9xwET0kYkM2A3qJOv2tqVKp | ||||
| 5m8/hU94N+HmB1MknyuGRJ+HJ1749B6iQ7EVcddG31l0WA2vsozs6kNbia1aXW2it3j0Hsl866+KQrdn | ||||
| 7KseiTGEHqIKzQvx/g1xaNA98YahmmqGKBRzsef/I1L96nb982yw/l4m+E1GRXV1ro+43voO9uyNt2KQ | ||||
| 6LxbUVP93opX9Yr+GWJnS4bdwClNRl90WnSbf+0yXDLP16V97DN1w2Ax3ZN2Jq2VOC35jHH/7sMEwpu6 | ||||
| kPhK0GiG2xv+PXWK50V7V8P9OG7c/5vxg0/lSjnFadCt/tE/r7OfuW16+nFI7PXeTqLddPsJY9vo/YDO | ||||
| PS1sPmQbucDQbzovyv9fFv8LAAD//+uCPa4CPgAA | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.5.json": { | ||||
| 		name:    "config_schema_v3.5.json", | ||||
| 		local:   "data/config_schema_v3.5.json", | ||||
| 		size:    16802, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xbSY/jNha++1cISm6pJcBkBpi+zXFOM+cpuAWaepaZokjmkXKX0/B/H0iUVFpIkbZV | ||||
| XdVIBwi6LD0ub/veQurrJknSnzU9QEnST0l6MEZ9enz8XUtxb58+SCwecyR7c//rb4/22U/pXT2O5fUQ | ||||
| KsWeFZl9kx3/9vD3h3q4JTEnBTWR3P0O1NhnCH9UDKEe/JQeATWTIt3ebep3CqUCNAx0+impN5ckPUn3 | ||||
| YDCtNshEkTaPz80MSZJqwCOjgxn6rf70+Dr/Y092N511sNnmuSLGAIr/zvfWvP78RO7//Nf9/369/+dD | ||||
| dr/95efR61q+CHu7fA57JphhUvTrpz3luf3r3C9M8rwhJny09p5wDWOeBZgvEp9DPPdk78Rzu76D5zE7 | ||||
| R8mrMqjBjuqdmLHLr6M/DRTBhE3WUr2bxdbLr8OwRY0Qwx3VOzFsl7+N4U3HtHuP6eeX+/rfczPn4nx2 | ||||
| lsH+GiZGmOcSpwtz/PLsBeqRZA6Ky1Ozc7fMLEEJwqS9mJIk3VWM51OpSwH/qad4GjxMkq9TeB/M07wf | ||||
| /fIbRf/ew0v/nkph4MU0TC0vbUUg6TPgnnGIHUHQWrpHZJxpk0nMckaNczwnO+A3zUAJPUC2R1kGZ9ln | ||||
| lhPtnKhD8EjODcECoiWrD2Wm2Z8juT6lTBgoANO7fuz2PBk7myzsmFOfrv/bbhwTppSojOT5iAmCSE71 | ||||
| jpiBUrv5S9JKsD8q+HdLYrCC6bw5SrX+xAXKSmWKYO2Fy7JPqSxLItZyzUv4iJD8LEiM/L1dY/iqX220 | ||||
| LQ83SYRVOuAiADdhwKktXVZIY/HjUj9KkrRieTxxcQlxKfPxvkVV7gDT84x45qSj39uN681E+4YwAZgJ | ||||
| UkLQjhFyEIYRnmkF1GczDqUtqSuNhPkUoWDa4MlJu/EgVRxKDbnMQYHIdWbLoctxPM2hr41WxZxcLMUn | ||||
| O00doeq9pZOBmQaC9HDleFkSJmIsBITBk5LMYuKHAzsQx6y3tovFAOLIUIqyQ/y4PGEw/kVJDbcjbR+1 | ||||
| W8bveoDYTjxmL7Ek9Wa7tb1eMre8oQCHPNT5NeEZZ+J5fROHF4MkO0htrknF0gMQbg70APR5YfiQajRa | ||||
| ahNj5KwkRZhI0SCJlpyYtu2yRHh1bpquqqXBtLIoalKfac5qncgqIUd2BIxNZaV6LdFc8TyUQwRr2hHp | ||||
| 5wdb0i64X/MX5/Pc2RWqp0+m0S42br1qpSS0TqIRtA5ZVFtiZLNM45V2RqxjIf2qyufyijNKdcG2RDB/ | ||||
| 9eWo8VYWl692aueMaNC3lZADFDr+FmkTrrH/WBzrGeqdM75gDEw1TIw5d25kG06V37KeVeN0f4wVDUIM | ||||
| HUxJNN+kAnvFqdfMwC4+L8qm6o4a9DaV3AJKxdVxXXvDPUBVO870AfJLxqA0kkoe5xjOhlW8MyxUdVcl | ||||
| cQrZkXEoJhzvpORAxChQIJA8k4KfIii1IRjshWigFTJzyqQyq6eP7ubWq9X3va3xhibHAj8aIH+dBog+ | ||||
| aWquy621yZnIpAIR9A1tpMoKJBQyBcikUxQjgM0rtKXBbBrNCkF4yM1MqfZXdguMCTt7xVnJ/E7j7AAF | ||||
| 8zWbq7lTtIX0LAqyFyqE5QIhojI4ELwgdDSOuffEp01kDjQ+4G/mu2s3snXSX5R6Tbex9WY/bqeqdLCI | ||||
| a2iEziJCu+Ok+vtA6JGOGvLtVTjerhSJnW+N+tEZwfj0TzNtQNBT/EI7NjsyubTuiqu6GipS+Fsx7tok | ||||
| 2lfbSwzfhBUhqVQe1cSz8cYJ7KTTsZC2+hDmi8TnOn7lDJc0ds2tiUkPcemof0gavD6xfO0gdCWAabKb | ||||
| nJK44nIdSPDoTg/C+QWCQTY5uOgyr2GCAPpjtvcNK0FW5trkiqC5PD2bXq4a3ODoDgqWTGhAObWgp96E | ||||
| uqZB0ExioimIvDmgiQq9CIozSnQovbmhRV2pnBjI2ntAKx0pKoKEc+BMlzGZWZoDJ6er7MaesxDGK4SM | ||||
| 0Ih2fqspwYzE65csyUvWLduQBLzWeinm4FsTRFU6ciPrF/d7htrYElqq9tcY1Fc8gUWwSYxeyxyc1co6 | ||||
| 95pUFdtYTUsoZfj0+tbe5OzQXNcRwXdS8lEE4KAuQAAymo2swYMuc9o3avfebtk2zEjObC68hnlTKew+ | ||||
| YpDnRqircaeu5EtldBS0fmEil18uj6grSFtxQmEShW8VtDZImDAXH6pOxaIQ9oAgKCy65by4XShw1+sc | ||||
| qrrKe4fe9q3KvyHvd8LNUuo2HzCrAcbac2jNry2/lupigCIY6Fd23cYKWcKyFaTPbfEdBOr0SHgV0ay9 | ||||
| 6njbV/5FDD47P94I6bQjWyEXj7lJEnXfoaXKpFq/4Rq+07ANt/uYIuVaCBt9AyR1FgwfATurnfD00z42 | ||||
| dt7Nb3l5tPrU9x7uellto1XsdYz19t+0QaanJK5+CTGG0ENUa+XCCveGSDTrFzqhqqX6gVQXINX3btff | ||||
| zgbb79CC3zo1VOFPx26wvIjb4R9Ar++srlkwdKqrpfqhrvdW1+T0faC2eR99SZLRVwQ3w7Z5v40pmePr | ||||
| b18F492U7zRnsmgrxGXOV4wfD78sZIpLV3nfKMVa4d6TW6eTFsWmv+U0/XjVjxHd+NmnrDWf4jQ75/k6 | ||||
| Pum2n6FuR/KZkNgb9IOAvY0qfF0fuE7P2bsPTT1Xf8bV4ab+/7z5fwAAAP//yoGbgKJBAAA= | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.6.json": { | ||||
| 		name:    "config_schema_v3.6.json", | ||||
| 		local:   "data/config_schema_v3.6.json", | ||||
| 		size:    17084, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xbS4/jNhK++1cISm7pxwAbBNi57XFPu+dteASaKstMUyRTpDztDPzfF3q2RJEibaun | ||||
| O8gECKYtFR/15FfF0rdNkqQ/a3qAkqSfk/RgjPr8+Pi7luK+ffogsXjMkezN/adfH9tnP6V39TiW10Oo | ||||
| FHtWZO2b7PiPh98e6uEtiTkpqInk7negpn2G8EfFEOrBT+kRUDMp0u3dpn6nUCpAw0Cnn5N6c0kykPQP | ||||
| RtNqg0wUafP43MyQJKkGPDI6mmHY6k+Pr/M/DmR39qyjzTbPFTEGUPx3vrfm9Zcncv/nv+7/9+n+nw/Z | ||||
| /faXnyeva/ki7Nvlc9gzwQyTYlg/HSjP3V/nYWGS5w0x4ZO194RrmPIswHyV+BzieSB7J5679R08T9k5 | ||||
| Sl6VQQ32VO/ETLv8OvrTQBFM2GRbqnez2Hr5dRhuo0aI4Z7qnRhul7+N4U3PtHuP6ZeX+/rfczPn4nzt | ||||
| LKP9NUxMYp5LnK6Y45fnIFCPJHNQXJ6anbtl1hKUIEw6iClJ0l3FeG5LXQr4Tz3F0+hhknyzw/tonub9 | ||||
| 5JffKIb3Hl6G91QKAy+mYWp56VYEkj4D7hmH2BEEW0v3iIwzbTKJWc6ocY7nZAf8phkooQfI9ijL4Cz7 | ||||
| rOVEOyfqI3gk54ZgAdGS1Ycy0+zPiVyfUiYMFIDp3TB2e7bGziYLO6bt0/V/241jwpQSlZE8nzBBEMmp | ||||
| 3hEzUGo3f0laCfZHBf/uSAxWYM+bo1TrT1ygrFSmCNZeuCz7lMqyJGIt17yEjwjJzw6Jib93a4xfDatN | ||||
| tuXhJomwSke4CISbcMCpLV1WSGPjx6V+lCRpxfJ44uIS4lLm032LqtwBpucZ8cxJJ7+3G9cbS/uGMAGY | ||||
| CVJC0I4RchCGEZ5pBdRnMw6lLakrjQzzKULBtMGTk3bjiVRxUWrMZQ4KRK6zNh26PI6nOQy50aoxJxdL | ||||
| 51M7TX1C1XtLrYGZBoL0cOV4WRImYiwEhMGTkqyNiR8u2IE4ZoO1XSwGEEeGUpR9xI/DCaPxL0pquD3S | ||||
| Dqd2x/jdECC2lsfsJZak3my/ttdL5pY3FuCYhxpfE55xJp7XN3F4MUiyg9TmGiiWHoBwc6AHoM8Lw8dU | ||||
| k9FSmxgjZyUpwkSKBkm05MR0ZZclwquxabqqlkbTyqKoSX2mOct1IrOEHNkRMBbKSvWaornO8xCGCOa0 | ||||
| E9IvD21Ku+B+zV+cz7Gz66i2n9inXey59aqVktAaRCNoHbKoLsXIZkjjlXZGrGND+lWZz+UZZ5TqgmWJ | ||||
| IH71YdR4K4vDq73aOSMa9G0p5CgKHX+NtAnX2N8Wx3qGeueMTxgDU42BMefOjWzDUPkt81k1hfvTWNFE | ||||
| iLGDKYnmu2Rgr3HqFRm0i8+TMlvdUYPeJpNbiFJxeVxf3nAPUNWOM32A/JIxKI2kksc5hrNgFe8MC1nd | ||||
| VSBOITsyDoXF8U5KDkRMDgoEkmdS8FMEpTYEg7UQDbRCZk6ZVGZ1+Ogubr1a/VDbmm7Iuhb4UQD5+xRA | ||||
| 9ElTcx221iZnIpMKRNA3tJEqK5BQyBQgk05RTAJsXmGbGsym0awQhIfczJRqf2W1wJiws1eclczvNM4K | ||||
| UBCvtVjNDdEW4FlUyF7IEJYThIjM4EDwgqOjccy953zaRGKg6QV/M99dt5Gtk/4i6GVvY+tFP26nqnQw | ||||
| iWtohM4ijnbHTfVfI0JPdNSQb6+K491KkbHzraN+NCKY3v5ppg0IeopfaMdmVyaX5l1xWVdDRQp/Kcad | ||||
| m0T7atfE8F1YEZJK5VHNjWwMR8rbc9FjOH9yakfOhTy2ZIKVVZl+Tj75MtZ4ybwxtLdqQAuA3hd7v0p8 | ||||
| rk/2nOGSLV/TT2JVV5eaIMakwcaS5YaMULME02Rn3R+5EEttKHh0A6cw8kIwyKwrnR6TjqET6I958WFY | ||||
| CbIy18JOguZy4Gq3nY16W/orlCUTGlHaFvQ0mFBfTgmaSQzOAJE3V1dRoARBcUaJDgG/G4r3lcqJgazr | ||||
| kFrpslURJJwDZ7qMwaxpDpycrrKb9gaKMF4hZIRGXHR0mhLMSLx+yZK8ZP2yDUnAa1svxRx8a4JoTg8b | ||||
| NbZ+cb9nqE1bXJCq+zUN6iveTSO08E6vZQ7OPG6dji9VxZac0xJKGb7Xv7VqO2sn0PWJ4LtD+igCcFAX | ||||
| IAAZzSbW4Ikuc9o3KoTfbtntMSM5a7OENcybStHuIyby3Bjq6rhDjIFSGR0VWr8ykcuvl5+oK0hbcULB | ||||
| OoVvFbQ2SJgwF18322JRCHtAEBQW3XKe9i+k/uvVVFWd/75D1f9W5d+A+53hZgm6zQfMcoCp9hxa82vL | ||||
| r6U6GaAIBoaVXX1qIUtYtoL0uStLBAN1eiS8iihjX3Xx70v/IgafnZ+1hHTak62AxWN6bKI6QTqqTKr1 | ||||
| S9Hhbo9tuBDKFCnXirDRvTGpM2H4CLGz2glPpfFjx867ef+bR6tPQ+3hbpDVNlrFXsdYb/9NGcS+P3LV | ||||
| S4gxhB6iSisXZrg3nESzSqozVHVUPyLVBZHqr27X388Guy/0gl+BNVThj+pusLyIvvkPoNd3VtfsMHSq | ||||
| q6P6oa73VpfVlzBS27yOviTJ6ObJzbhsPmzDJnN8F+/LYLyb8t3mWIt2QlzmfMXz4+GXBaS41OT8RhBr | ||||
| hY4wt06tEsVm6P+yP+v1x4h+/Owj35pPcZrd83yb9gC0H+huJ/KxSNpvC0YH9jYq8XV9+mt3IPSf4Hqa | ||||
| oqbZ4ab+/7z5fwAAAP//nm8U9rxCAAA= | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.7.json": { | ||||
| 		name:    "config_schema_v3.7.json", | ||||
| 		local:   "data/config_schema_v3.7.json", | ||||
| 		size:    17854, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xc3W/bOBJ/918haPdt46TALe5wfbvHe7p7vsAVaGpsc0OR3CHlxi38vx/0GYkiRdpW | ||||
| mhSbAkUTafgxn/zNcNTvqyRJf9X0AAVJPyfpwRj1+eHhDy3Funl6L3H/kCPZmfWn3x+aZ7+kd9U4lldD | ||||
| qBQ7ts+aN9nxb/f/uK+GNyTmpKAikts/gJrmGcKfJUOoBj+mR0DNpEg3d6vqnUKpAA0DnX5Oqs0lSU/S | ||||
| PRhMqw0ysU/rx+d6hiRJNeCR0cEM/VZ/eXiZ/6Enu7NnHWy2fq6IMYDiv9O91a+/PJL1t3+t//dp/c/7 | ||||
| bL357dfR60q+CLtm+Rx2TDDDpOjXT3vKc/vTuV+Y5HlNTPho7R3hGsY8CzBfJT6FeO7J3ojndn0Hz2N2 | ||||
| jpKXRVCDHdUbMdMsv4z+NFAEEzbZhurNLLZafhmGm6gRYrijeiOGm+VvY3jVMe3eY/rleV39e67nnJ2v | ||||
| mWWwv5qJUcxzidMVc/zy7AXqkWQOistTvXO3zBqCAoRJezElSbotGc9tqUsB/6mmeBw8TJLvdngfzFO/ | ||||
| H/3mN4r+vYeX/j2VwsCzqZmaX7oRgaRPgDvGIXYEwcbSPSLjTJtMYpYzapzjOdkCv2kGSugBsh3KIjjL | ||||
| Lms40c6JuggeybkhuIdoyepDkWn2bSTXx5QJA3vA9K4fuzlbYyeThR3T9unqz2blmDClRGUkz0dMEERy | ||||
| qnbEDBTazV+SloL9WcK/WxKDJdjz5ijV8hPvUZYqUwQrL5yXfUplURCxlGtewkeE5CeHxMjf2zWGr/rV | ||||
| RtvycJNEWKUjXATCTTjgVJYuS6Sx8eNSP0qStGR5PPH+EuJC5uN9i7LYAqbnCfHESUe/b1auN5b2DWEC | ||||
| MBOkgKAdI+QgDCM80wqoz2YcSptTVxoZ5lOEPdMGT07alSdSxUWpIZc5KBC5zpp06PI4nubQ50aLxpxc | ||||
| zJ1PzTTVCVXtLbUGZhoI0sOV42VBmIixEBAGT0qyJia+u2AH4pj11naxGEAcGUpRdBE/DicMxj8rqeH2 | ||||
| SNuf2i3jd32A2Fges5NYkGqz3dpeL5la3lCAQx4qfE14xpl4Wt7E4dkgyQ5Sm2ugWHoAws2BHoA+zQwf | ||||
| Uo1GS21ijJwVZB8mEmx8lmyl5EDEmEjR4DxacmLa2swc4dUANl1UlYNp5X5fkfrsd5IQRaYSObIjYCze | ||||
| leolj3Md+iGgEUx8R6Rf7pu8d8ZH6584nwJs13luP7GPxNjD7UUrBaEV0kbQOmRRbR6STeDIC+2EWMfG | ||||
| /avSo8vT0ijVBWsXQZDrA7LxVhYHaju1c0Y06NvyzEEUOv4eaROusX+fHesZ6p0zPqsMTDVEz5w7N7IJ | ||||
| 4+nXTHrVOCcYx4o6QgwdTEk0PyRNe4lTL/ChWXyaudnqjhr0OuneTJSKS/a6Goh7gCq3nOkD5JeMQWkk | ||||
| lTzOMZxVrXhnmEn9rkJ6CtmRcdhbHLtgDALJMyn4KYJSG4LBgokGWiIzp0wqszjGdFfAXqy+L4CNN2Td | ||||
| HXxUSf46VRJ90tRch621yZnIpAIR9A1tpMr2SChkCpBJpyhGATYvsUkNJtNotheEh9zMFGp3ZUnBmLCz | ||||
| l5wVzO80zjJREK81WM0N0WbgWVTInskQ5hOEiMzgQPCCo6N2zJ3nfFpFYqBxF0A93127kY2T/iLoZW9j | ||||
| 40U/bqcqdTCJq2mEziKOdsd19s8RoUc6qsk3V8XxdqXI2PnaUT8aEYyvCDXTBgQ9xS+0ZZN7lUvzrris | ||||
| q6Yie38pxp2bRPtq2+nwQ1gRkkrlUc2NbPRHyutz0WE4f3JqR86ZPLZgghVlkX5OPvky1njJvDK0t2pA | ||||
| M4DeF3u/SnyqTvac4Zwtn+d7P8Z9FRc2p1il2rmOiiFpsEtlvrsj1HnBNNlal1HOuq0wgEc3wAojNASD | ||||
| zLof6rDrEGKBfp+3KIYVIEtzLTwlaC4HuHYP26BRpruPmTOhAaVtQY+9CXVll6CZxOAREHl9DxYFXhAU | ||||
| Z5ToEEC8ociPkvMtoU9Z23C10N2tIkg4B850EYNu0xw4OV1lOc2FFmG8RMgIjbgSaXUlmJF4/ZIFec66 | ||||
| ZWuSgN82foo5+NYEUZ8zNr5sPGO9Y6hNU4aQqv1tHP4XvOouVU4MfJjEh0kMK3R1bqCXMgdnEWCZnkJV | ||||
| xt5XpAUUMtw5cmvJf9KwoiuY4LuAfC8CcFDvQQAymo2swXPkTGlf6RbldstusIfkrEkxlzBvKkWzj5jI | ||||
| c2Ooq+JOBcQLZXRUaP3KRC6/Xg6zFpC24oSCBc1uFbQ2SJgwF/cq2GJRCDtAEBRm3XJaM5qpGy1XkFcI | ||||
| JH+DK6NblX/DlwrOcDOH56cDJonhWHsOrfm15ddSlSFSBAP9yq5OyJAlzFtB+tTWtIKBOj0SXkbcgVzV | ||||
| NeKrHUQMPjs/nArptCNbIEGL6eKKaiNqqTKplr/HCLcKbcJVdKZIsVSEjW6sSp0Jw3uIneVWeMrU7zt2 | ||||
| 3k07LD1afewLUne9rDbRKvY6xnL7r2tj9uWjq4hGjCH0EFVvu7Ds8QPKl5NyvTOktVQfEe2CiPaz2//7 | ||||
| s9X2m9Lgd4s1Vfgz0BssNOJLj3eg/59ErZND2KnWlupDrT+LWq2mm4F6p5c/cxKP7gxeDe96+m3YZI7/ | ||||
| GcKXYXk35buqtBZthT3P+YLn1v1vM0h2roP/lSDgAu2Obp1aJZRV39xof9jujyXd+Mln7hWf4jS5nPw+ | ||||
| bnBpPlHfjORjkTRf1wyAwiYqMXd9/G6313QfoXs6/sbZ66r6e179PwAA//8ZL3SpvkUAAA== | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.8.json": { | ||||
| 		name:    "config_schema_v3.8.json", | ||||
| 		local:   "data/config_schema_v3.8.json", | ||||
| 		size:    18246, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIkjmlmNOyTkNj0BTZZvbFMktUp72DvzfAz1bokiRtuXu | ||||
| 3qQDBDstFR/15FfFkn+skiT9WdM9FCT9mqR7Y9TXx8fftBT3zdMHibvHHMnW3H/59bF59lN6V41jeTWE | ||||
| SrFlu6x5kx3+8vC3h2p4Q2KOCioiufkNqGmeIfxeMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd | ||||
| g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/4x/3//ly//eH | ||||
| 7H79y8+j15V8EbbN8jlsmWCGSdGvn/aUp/Zfp35hkuc1MeGjtbeEaxjzLMB8l/gc4rkneyee2/UdPI/Z | ||||
| OUheFkENdlTvxEyz/DL600ARTNhkG6p3s9hq+WUYbqJGiOGO6p0Ybpa/juFVx7R7j+m3l/vqv6d6ztn5 | ||||
| mlkG+6uZGMU8lzhdMccvz16gHknmoLg81jt3y6whKECYtBdTkqSbkvHclroU8K9qiqfBwyT5YYf3wTz1 | ||||
| +9FffqPo33t46d9TKQy8mJqp+aUbEUj6DLhlHGJHEGws3SMyzrTJJGY5o8Y5npMN8KtmoITuIduiLIKz | ||||
| bLOGE+2cqIvgkZwbgjuIlqzeF5lmf4zk+pQyYWAHmN71Y9cna+xksrBj2j5d/W+9ckyYUqIykucjJggi | ||||
| OVY7YgYK7eYvSUvBfi/hny2JwRLseXOUavmJdyhLlSmClRfOyz6lsiiIWMo1z+EjQvKTQ2Lk7+0aw1f9 | ||||
| aqNtebhJIqzSES4C4SYccCpLlyXS2Phxrh8lSVqyPJ54dw5xIfPxvkVZbADT04R44qSjv9cr1xtL+4Yw | ||||
| AZgJUkDQjhFyEIYRnmkF1GczDqXNqas1wQjxpJEHQoqwY9rg0Um78sS0uHg2lEcOCkSusyZxOj/ipzn0 | ||||
| WdSi0SkXcydZM011llV7S62BmQaCdH/heFkQJmJsCYTBo5KsiZ4fLiyCOGS9tZ0tBhAHhlIU3dkQhygG | ||||
| 41+U1HB9TO7P95bxuz6UrG3PkliQarPd2l4vmVreUIBDHiokTnjGmXhe3sThxSDJ9lKbS0BbugfCzZ7u | ||||
| gT7PDB9SjUZLbWKMnBVkFyYSbHzqbKTkQMSYSNHgPFpyYtoqzhzhxVA3XVSVg2nlbleR+ux3kjpFJh05 | ||||
| sgNgLDKW6jXjc8GDECQJpsgj0m8PTYY846P1vzifQnHXyW8/sY/E2MPtVSsFoRUmR9A6ZFFtxpJNgMsr | ||||
| 7YRYx8b9ixKp8xPYKNUFqxxBOOyDvPFWFgd/O7VzRjTo6zLSQRQ6/BppE66xf50d6xnqnTM+/wxMNcTZ | ||||
| nDs3sg4j71umx2qcPYxjRR0hhg6mJJo3Sehe49QrfGgWn+Z4trqjBt0mMZyJUnFpYVctcQ9Q5YYzvYf8 | ||||
| nDEojaSSxzmGs/4V7wwzSeJFSE8hOzAOO4tjF4xBIHkmBT9GUGpDMFha0UBLZOaYSWUWx5juWtmr1fel | ||||
| svGGrFuGz3rK/089RR81NZdha21yJjKpQAR9Qxupsh0SCpkCZNIpilGAzUtsUoPJNJrtBOEhNzOF2l5Y | ||||
| UjAm7OwlZwXzO42zoBTEaw1Wc0O0GXgWFbJnMoT5BCEiM9gTPOPoqB1z6zmfVpEYaNwvUM93125k7aQ/ | ||||
| C3rZ21h70Y/bqUodTOJqGqGziKPdcfH954jQIx3V5OuL4ni7UmTsvHXUj0YE44KxZtqAoMf4hTZscgNz | ||||
| bt4Vl3XVVGTnL8W4c5NoX217It6EFSGpVB7VXMlGf6TcnosOw/mTUztyzuSxBROsKIv0a/LFl7HGS+bG | ||||
| 0N6qAc0Ael/s/S7xuTrZc4Zztnya7xIZd2Cc2cZilWrnei+GpMF+lvk+kFCPBtNkY11GOeu2wgAe3AAr | ||||
| jNAQDDLrfqjDrkOIBfpj3qIYVoAszaXwlKA5H+Da3W6DlpruPmbOhAaUtgU99SbUlV2CZhKDR0Dk9T1Y | ||||
| FHhBUJxRokMA8YoiP0rON4Q+Z6/3skvc8iqChHPgTBcx6DbNgZPjRZbTXGgRxkuEjNCIK5FWV4IZiZcv | ||||
| WZCXrFu2Jgn4beOnmINvTRD1OWPjy8Yz7rcMtWnKEFK1f43D/4JX3aXKiYFPk/g0iWGFrs4N9FLm4CwC | ||||
| LNN9qMrY+4q0gEKGO0euLflPGlZ0BRN8F5AfRQAO6h0IQEazkTV4jpwp7Y1uUa637AZ7SM6aFHOhNqdm | ||||
| HzGR58pQV8WdCogXyuio0PqdiVx+Px9mLSBtxQkFC5pdK2htkDBhzu5VsMWiELaAICjMuuW0ZjRTN1qu | ||||
| IK8QSP4OV0Yua+uAaQXYM2EjWVdF8hKzueJrCGegmssEpgMmKeVY7w59+/Xs12+VW1IEA/3Krm7LkA3N | ||||
| 20/63FbDgiE+PRBeRtyeXNRv4qs6RAw+OT/OCum0I1sgtYvp/4pqQGqpMqmWvwEJNxmtw/V3pkixVGyO | ||||
| bslKnanGR4i65UZ4Ctw3jrrLHbldb6ZHq099Keuul9U6WsVex1hu/3VVzb62dJXfiDGE7qMqdWcWTN6g | ||||
| 8Dkp9DtDWkv1GdHOiGh/dvv/eLbafrca/Daypgp/anqFhUZ8I/IB9L+EWv/n3LLKVzkxkM2w8wa2PEEe | ||||
| TltuqT5teWlb/iBWYLU0DaxherU2p6DovuvV8Cat34ZN5viFDl8W6t2U7yLYWrTVzTznCwaRh19m0P7c | ||||
| 9xE3gskLNJO6dWoVqFZ966j9AwP+0NONn/zcQMWnOE6ufn+M24eanwpYj+RjkTTfLg2i9jqqeOH6EQK7 | ||||
| ean7MQBPP+U4w19V/z+t/hsAAP//Fd/bF0ZHAAA= | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data/config_schema_v3.9.json": { | ||||
| 		name:    "config_schema_v3.9.json", | ||||
| 		local:   "data/config_schema_v3.9.json", | ||||
| 		size:    18407, | ||||
| 		modtime: 1518458244, | ||||
| 		compressed: ` | ||||
| H4sIAAAAAAAC/+xcSY/jNha++1cISm6ppYEJBkjf5jinmfMU3AJNPdtMUSTzSLnLadR/H2gtiSJFylYt | ||||
| makAQZetx+XxLfzeIv/YJEn6s6ZHKEj6NUmPxqiv9/e/aylum2/vJB7ucyR7c/vl1/vmu5/Sm2ocy6sh | ||||
| VIo9O2TNk+z0t7vf7qrhDYk5K6iI5O53oKb5DuGPkiFUgx/SE6BmUqTbm031TKFUgIaBTr8m1eaSpCfp | ||||
| vhhMqw0ycUjrr5/rGZIk1YAnRgcz9Fv96f5l/vue7MaedbDZ+ntFjAEU/57urX787YHc/vmP2/98uf3t | ||||
| Lrvd/vLz6HF1vgj7Zvkc9kwww6To1097yuf2r+d+YZLnNTHho7X3hGsY8yzAfJf4GOK5J3snntv1HTyP | ||||
| 2TlJXhZBCXZU78RMs/w68tNAEUxYZRuqd9PYavl1GG68RojhjuqdGG6Wv47hTce0e4/pt6fb6t/nes7Z | ||||
| +ZpZBvurmRj5PNdxunyO/zz7A/WcZA6Ky3O9c/eZNQQFCJP2x5Qk6a5kPLdPXQr4VzXFw+DLJPlhu/fB | ||||
| PPXz0Se/UvTPPbz0z6kUBp5MzdT80s0RSPoIuGccYkcQbDTdc2ScaZNJzHJGjXM8JzvgV81ACT1CtkdZ | ||||
| BGfZZw0n2jlR58EjOTcEDxB9svpYZJr9OTrXh5QJAwfA9KYfu3UNhieDJDtKbeJPypplMm3Yvm3XUP23 | ||||
| 3TgmTClRGcnz0VkQRHKuGGMGCu0+piQtBfujhH+2JAZLsOfNUar1Jz6gLJXQWSHzkKq3xJkiWFl+iFgW | ||||
| BRFruYMlTEeIaXIxjXxMu8bwUb/aaFsebpIIS3C4qICLCzu5yrpkiTTWZy213SRJS5bHEx+WEE8UUJTF | ||||
| DnBiv2MznH7eblxPLOkbwgRgJkgRVnqEHIRhhGdaAfXpjENoc+JqVTDieNLISyhFODBt8Oyk3XgcYJzz | ||||
| G55HDgpErrMmWFt+y6Q59JHbqq4sF3N3QjNNdStUe0utgZkGgvR44XhZECZidAmEwbOSrPGeH84tgjhl | ||||
| vbYtPgYQJ4ZSFN3dEIdiBuOflNRwvU/uMUXL+E3vSra2ZUksSLXZbm2vlUw1b3iAQx4q9E94xpl4XF/F | ||||
| L4E/g+FHINwc6RHo48zwIdVotNQmRslZQQ5hIsHGt85OSg5EjIkUDc6jJSemzRzNEV4Mr9NVRTmYVh4O | ||||
| FalPfyfhWmSgkyM7AcaicaleokwXPAhBkmBYPiL9dtdE5TM2Wv/Febp9dkwRwgL2lRh7ub1IpSC0AvAI | ||||
| Woc0qo2S5pDzhFjH+v2LgrflQXOU6IKZlSAc9kHeeC2Lg7+d2DkjGvR1UfDAC51+jdQJ19i/z471DPXO | ||||
| GR+sBqYa4mzOnRvZhpH3a8bSahw9jH1F7SGGBqYkmjcJ6F781At8aBafxni2uKMGvU5gGBHfz4eFXYbG | ||||
| PUCVO870EfIlY1AaSSWPMwxnzi3eGGaCxIuQnkJ2YhwOFscuGINA8kwKfo6g1IZgMLWigZbIzDmTyqyO | ||||
| Md35uRetd6TnHJWNz3zK/08+RZ81NZdha21yJjKpQARtQxupsgMSCpkCZNJ5FCMHm5fYhAaTaTQ7CMJD | ||||
| ZmYKtb8wpWBM2NhLzgrmNxpnQimI1xqs5oZoM/AsymXPRAjzAUJEZHAkuODqqA1z77mfNpEYaNyjUM93 | ||||
| 025k66RfBL3sbWy96MdtVKUOBnE1TWTqflps/2t46JGMavLtRX68XSnSd762149GBOOEsWbagKDn+IV2 | ||||
| bFKBWRp3xUVdNRU5+FMx7tgk2lbbPow3YUVIKpVHNFey0V8pr89Fh+H8wantOWfi2IIJVpRF+jX54otY | ||||
| 40/mlaG9lQOaAfQ+3/td4mN1s+cM53T5eb4zZdz1sbB1xkrVzvV7DEmDPTTzvSehvhCmyc4qRjnztsIA | ||||
| ntwAK4zQEAwyqz7UYdchxAL9MasohhUgS3MpPCVolgNcu8Nu0MbT1WPmVGhAaWvQQ69CXdolqCYxeARE | ||||
| XtfBosALguKMEh0CiFck+VFyviP0MXupy65R5VUECefAmS5i0G2aAyfnizSnKWgRxkuEjNCIkkgrK8GM | ||||
| xMuXLMhT1i1bkwTstrFTzMG3Joj6nrHxZWMZt3uG2jRpCKnaT2P3v2Kpu1Q5MfCpEp8qMczQ1bGBXksd | ||||
| nEmAdToeVRlbr0gLKCTGxhWpYrmOir2vqQ9Mult0hSl81coPfFoHEICMZiPV8dxPU9pXKrlcbwYNUJGc | ||||
| NfHoSj1RzT5i3NSVfrFyUhVqL5TRUX74OxO5/L4ck61w2ooTChaOu/agtUHChFnc2GAfi0LYA4KgMGuW | ||||
| 0wTTTJJpvey9QiD5O9SXXNrWodgK3WfChr0uF3qJ2lzxuobTUc2FDdMBk/hzLHeHvP1y9su3CkQpgoF+ | ||||
| ZVdrZkiH5vUnfWxTZ0EXn54ILyNKLRc1p/hSFBGDn51vj4Vk2pGtEAfGNItFdSu1VJlU65dLwh1J23Cy | ||||
| nilSrOWbo/u3Umdc8hG8brkTnmz4K3vd9a7crpHTI9WHPu9105/VNlrEXsNYb/91Cs6ucbpydcQYQo9R | ||||
| ab2F2ZU3yJJOqgJOl9ZSfXq0BR7tr67/H09X2xdrgy9v1lThd2Gv0NCIF0o+gPzXEOv/nFlW8SonBrIZ | ||||
| dt5AlyfIw6nLLdWnLq+tyx9EC6z+p4E2TOtwcwKKbtLeDMtu/TZsMsdPiPiiUO+mfFVja9FWNvOcr+hE | ||||
| 7n6ZQftzL1O8EkxeofPULVMrQbXp+0ztX0Dwu55u/OT3ECo+xXlSJ/4x7jVqfstg/BK6RdK86DTw2tuo | ||||
| 5IXrVxLsTqfu1wo8zZfjCH9T/f+8+W8AAAD//6unbt7nRwAA | ||||
| `, | ||||
| 	}, | ||||
|  | ||||
| 	"/data": { | ||||
| 		name:  "data", | ||||
| 		local: `data`, | ||||
| 		isDir: true, | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| var _escDirs = map[string][]os.FileInfo{ | ||||
|  | ||||
| 	"data": { | ||||
| 		_escData["/data/config_schema_v3.0.json"], | ||||
| 		_escData["/data/config_schema_v3.1.json"], | ||||
| 		_escData["/data/config_schema_v3.2.json"], | ||||
| 		_escData["/data/config_schema_v3.3.json"], | ||||
| 		_escData["/data/config_schema_v3.4.json"], | ||||
| 		_escData["/data/config_schema_v3.5.json"], | ||||
| 		_escData["/data/config_schema_v3.6.json"], | ||||
| 		_escData["/data/config_schema_v3.7.json"], | ||||
| 		_escData["/data/config_schema_v3.8.json"], | ||||
| 		_escData["/data/config_schema_v3.9.json"], | ||||
| 	}, | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/google/go-cmp/cmp/report_compare.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/google/go-cmp/cmp/report_compare.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -79,7 +79,7 @@ func (opts formatOptions) verbosity() uint { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| const maxVerbosityPreset = 3 | ||||
| const maxVerbosityPreset = 6 | ||||
|  | ||||
| // verbosityPreset modifies the verbosity settings given an index | ||||
| // between 0 and maxVerbosityPreset, inclusive. | ||||
| @@ -100,7 +100,7 @@ func verbosityPreset(opts formatOptions, i int) formatOptions { | ||||
| func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { | ||||
| 	if opts.DiffMode == diffIdentical { | ||||
| 		opts = opts.WithVerbosity(1) | ||||
| 	} else { | ||||
| 	} else if opts.verbosity() < 3 { | ||||
| 		opts = opts.WithVerbosity(3) | ||||
| 	} | ||||
|  | ||||
|   | ||||
							
								
								
									
										25
									
								
								vendor/github.com/google/go-cmp/cmp/report_slices.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/google/go-cmp/cmp/report_slices.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -26,8 +26,6 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { | ||||
| 		return false // No differences detected | ||||
| 	case !v.ValueX.IsValid() || !v.ValueY.IsValid(): | ||||
| 		return false // Both values must be valid | ||||
| 	case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): | ||||
| 		return false // Both slice values have to be non-empty | ||||
| 	case v.NumIgnored > 0: | ||||
| 		return false // Some ignore option was used | ||||
| 	case v.NumTransformed > 0: | ||||
| @@ -45,7 +43,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	switch t := v.Type; t.Kind() { | ||||
| 	// Check whether this is an interface with the same concrete types. | ||||
| 	t := v.Type | ||||
| 	vx, vy := v.ValueX, v.ValueY | ||||
| 	if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { | ||||
| 		vx, vy = vx.Elem(), vy.Elem() | ||||
| 		t = vx.Type() | ||||
| 	} | ||||
|  | ||||
| 	// Check whether we provide specialized diffing for this type. | ||||
| 	switch t.Kind() { | ||||
| 	case reflect.String: | ||||
| 	case reflect.Array, reflect.Slice: | ||||
| 		// Only slices of primitive types have specialized handling. | ||||
| @@ -57,6 +64,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		// Both slice values have to be non-empty. | ||||
| 		if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		// If a sufficient number of elements already differ, | ||||
| 		// use specialized formatting even if length requirement is not met. | ||||
| 		if v.NumDiff > v.NumSame { | ||||
| @@ -68,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { | ||||
|  | ||||
| 	// Use specialized string diffing for longer slices or strings. | ||||
| 	const minLength = 64 | ||||
| 	return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength | ||||
| 	return vx.Len() >= minLength && vy.Len() >= minLength | ||||
| } | ||||
|  | ||||
| // FormatDiffSlice prints a diff for the slices (or strings) represented by v. | ||||
| @@ -77,6 +89,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { | ||||
| func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { | ||||
| 	assert(opts.DiffMode == diffUnknown) | ||||
| 	t, vx, vy := v.Type, v.ValueX, v.ValueY | ||||
| 	if t.Kind() == reflect.Interface { | ||||
| 		vx, vy = vx.Elem(), vy.Elem() | ||||
| 		t = vx.Type() | ||||
| 		opts = opts.WithTypeMode(emitType) | ||||
| 	} | ||||
|  | ||||
| 	// Auto-detect the type of the data. | ||||
| 	var isLinedText, isText, isBinary bool | ||||
|   | ||||
							
								
								
									
										3
									
								
								vendor/github.com/imdario/mergo/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/imdario/mergo/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,4 +1,7 @@ | ||||
| language: go | ||||
| arch: | ||||
|     - amd64 | ||||
|     - ppc64le | ||||
| install: | ||||
|   - go get -t | ||||
|   - go get golang.org/x/tools/cmd/cover | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/imdario/mergo/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/imdario/mergo/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -97,7 +97,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont | ||||
| - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) | ||||
| - [jnuthong/item_search](https://github.com/jnuthong/item_search) | ||||
| - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) | ||||
| - [janoszen/containerssh](https://github.com/janoszen/containerssh) | ||||
| - [containerssh/containerssh](https://github.com/containerssh/containerssh) | ||||
|  | ||||
| ## Install | ||||
|  | ||||
|   | ||||
							
								
								
									
										7
									
								
								vendor/github.com/imdario/mergo/merge.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/imdario/mergo/merge.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -95,13 +95,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { | ||||
| 			if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { | ||||
| 				dst.Set(src) | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Map: | ||||
| 		if dst.IsNil() && !src.IsNil() { | ||||
| 			if dst.CanSet() { | ||||
| 				dst.Set(reflect.MakeMap(dst.Type())) | ||||
| 			} else { | ||||
| 				dst = src | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if src.Kind() != reflect.Map { | ||||
|   | ||||
							
								
								
									
										1
									
								
								vendor/github.com/joho/godotenv/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/joho/godotenv/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| .DS_Store | ||||
							
								
								
									
										8
									
								
								vendor/github.com/joho/godotenv/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/joho/godotenv/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,8 @@ | ||||
| language: go | ||||
|  | ||||
| go: | ||||
|   - 1.x | ||||
|  | ||||
| os: | ||||
|   - linux | ||||
|   - osx | ||||
							
								
								
									
										23
									
								
								vendor/github.com/joho/godotenv/LICENCE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/joho/godotenv/LICENCE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| Copyright (c) 2013 John Barton | ||||
|  | ||||
| MIT License | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining | ||||
| a copy of this software and associated documentation files (the | ||||
| "Software"), to deal in the Software without restriction, including | ||||
| without limitation the rights to use, copy, modify, merge, publish, | ||||
| distribute, sublicense, and/or sell copies of the Software, and to | ||||
| permit persons to whom the Software is furnished to do so, subject to | ||||
| the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be | ||||
| included in all copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
| EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
| MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
| NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||||
| LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||||
| OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||||
| WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
|  | ||||
							
								
								
									
										163
									
								
								vendor/github.com/joho/godotenv/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										163
									
								
								vendor/github.com/joho/godotenv/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,163 @@ | ||||
| # GoDotEnv [](https://travis-ci.org/joho/godotenv) [](https://ci.appveyor.com/project/joho/godotenv) [](https://goreportcard.com/report/github.com/joho/godotenv) | ||||
|  | ||||
| A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) | ||||
|  | ||||
| From the original Library: | ||||
|  | ||||
| > Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. | ||||
| > | ||||
| > But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. | ||||
|  | ||||
| It can be used as a library (for loading in env for your own daemons etc) or as a bin command. | ||||
|  | ||||
| There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| As a library | ||||
|  | ||||
| ```shell | ||||
| go get github.com/joho/godotenv | ||||
| ``` | ||||
|  | ||||
| or if you want to use it as a bin command | ||||
| ```shell | ||||
| go get github.com/joho/godotenv/cmd/godotenv | ||||
| ``` | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Add your application configuration to your `.env` file in the root of your project: | ||||
|  | ||||
| ```shell | ||||
| S3_BUCKET=YOURS3BUCKET | ||||
| SECRET_KEY=YOURSECRETKEYGOESHERE | ||||
| ``` | ||||
|  | ||||
| Then in your Go app you can do something like | ||||
|  | ||||
| ```go | ||||
| package main | ||||
|  | ||||
| import ( | ||||
|     "github.com/joho/godotenv" | ||||
|     "log" | ||||
|     "os" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
|   err := godotenv.Load() | ||||
|   if err != nil { | ||||
|     log.Fatal("Error loading .env file") | ||||
|   } | ||||
|  | ||||
|   s3Bucket := os.Getenv("S3_BUCKET") | ||||
|   secretKey := os.Getenv("SECRET_KEY") | ||||
|  | ||||
|   // now do something with s3 or whatever | ||||
| } | ||||
| ``` | ||||
|  | ||||
| If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import | ||||
|  | ||||
| ```go | ||||
| import _ "github.com/joho/godotenv/autoload" | ||||
| ``` | ||||
|  | ||||
| While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit | ||||
|  | ||||
| ```go | ||||
| _ = godotenv.Load("somerandomfile") | ||||
| _ = godotenv.Load("filenumberone.env", "filenumbertwo.env") | ||||
| ``` | ||||
|  | ||||
| If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) | ||||
|  | ||||
| ```shell | ||||
| # I am a comment and that is OK | ||||
| SOME_VAR=someval | ||||
| FOO=BAR # comments at line end are OK too | ||||
| export BAR=BAZ | ||||
| ``` | ||||
|  | ||||
| Or finally you can do YAML(ish) style | ||||
|  | ||||
| ```yaml | ||||
| FOO: bar | ||||
| BAR: baz | ||||
| ``` | ||||
|  | ||||
| as a final aside, if you don't want godotenv munging your env you can just get a map back instead | ||||
|  | ||||
| ```go | ||||
| var myEnv map[string]string | ||||
| myEnv, err := godotenv.Read() | ||||
|  | ||||
| s3Bucket := myEnv["S3_BUCKET"] | ||||
| ``` | ||||
|  | ||||
| ... or from an `io.Reader` instead of a local file | ||||
|  | ||||
| ```go | ||||
| reader := getRemoteFile() | ||||
| myEnv, err := godotenv.Parse(reader) | ||||
| ``` | ||||
|  | ||||
| ... or from a `string` if you so desire | ||||
|  | ||||
| ```go | ||||
| content := getRemoteFileContent() | ||||
| myEnv, err := godotenv.Unmarshal(content) | ||||
| ``` | ||||
|  | ||||
| ### Command Mode | ||||
|  | ||||
| Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` | ||||
|  | ||||
| ``` | ||||
| godotenv -f /some/path/to/.env some_command with some args | ||||
| ``` | ||||
|  | ||||
| If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` | ||||
|  | ||||
| ### Writing Env Files | ||||
|  | ||||
| Godotenv can also write a map representing the environment to a correctly-formatted and escaped file | ||||
|  | ||||
| ```go | ||||
| env, err := godotenv.Unmarshal("KEY=value") | ||||
| err := godotenv.Write(env, "./.env") | ||||
| ``` | ||||
|  | ||||
| ... or to a string | ||||
|  | ||||
| ```go | ||||
| env, err := godotenv.Unmarshal("KEY=value") | ||||
| content, err := godotenv.Marshal(env) | ||||
| ``` | ||||
|  | ||||
| ## Contributing | ||||
|  | ||||
| Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. | ||||
|  | ||||
| *code changes without tests will not be accepted* | ||||
|  | ||||
| 1. Fork it | ||||
| 2. Create your feature branch (`git checkout -b my-new-feature`) | ||||
| 3. Commit your changes (`git commit -am 'Added some feature'`) | ||||
| 4. Push to the branch (`git push origin my-new-feature`) | ||||
| 5. Create new Pull Request | ||||
|  | ||||
| ## Releases | ||||
|  | ||||
| Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. | ||||
|  | ||||
| Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` | ||||
|  | ||||
| ## CI | ||||
|  | ||||
| Linux: [](https://travis-ci.org/joho/godotenv) Windows: [](https://ci.appveyor.com/project/joho/godotenv) | ||||
|  | ||||
| ## Who? | ||||
|  | ||||
| The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. | ||||
							
								
								
									
										346
									
								
								vendor/github.com/joho/godotenv/godotenv.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										346
									
								
								vendor/github.com/joho/godotenv/godotenv.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,346 @@ | ||||
| // Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) | ||||
| // | ||||
| // Examples/readme can be found on the github page at https://github.com/joho/godotenv | ||||
| // | ||||
| // The TL;DR is that you make a .env file that looks something like | ||||
| // | ||||
| // 		SOME_ENV_VAR=somevalue | ||||
| // | ||||
| // and then in your go code you can call | ||||
| // | ||||
| // 		godotenv.Load() | ||||
| // | ||||
| // and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") | ||||
| package godotenv | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| const doubleQuoteSpecialChars = "\\\n\r\"!$`" | ||||
|  | ||||
| // Load will read your env file(s) and load them into ENV for this process. | ||||
| // | ||||
| // Call this function as close as possible to the start of your program (ideally in main) | ||||
| // | ||||
| // If you call Load without any args it will default to loading .env in the current path | ||||
| // | ||||
| // You can otherwise tell it which files to load (there can be more than one) like | ||||
| // | ||||
| //		godotenv.Load("fileone", "filetwo") | ||||
| // | ||||
| // It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults | ||||
| func Load(filenames ...string) (err error) { | ||||
| 	filenames = filenamesOrDefault(filenames) | ||||
|  | ||||
| 	for _, filename := range filenames { | ||||
| 		err = loadFile(filename, false) | ||||
| 		if err != nil { | ||||
| 			return // return early on a spazout | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Overload will read your env file(s) and load them into ENV for this process. | ||||
| // | ||||
| // Call this function as close as possible to the start of your program (ideally in main) | ||||
| // | ||||
| // If you call Overload without any args it will default to loading .env in the current path | ||||
| // | ||||
| // You can otherwise tell it which files to load (there can be more than one) like | ||||
| // | ||||
| //		godotenv.Overload("fileone", "filetwo") | ||||
| // | ||||
| // It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. | ||||
| func Overload(filenames ...string) (err error) { | ||||
| 	filenames = filenamesOrDefault(filenames) | ||||
|  | ||||
| 	for _, filename := range filenames { | ||||
| 		err = loadFile(filename, true) | ||||
| 		if err != nil { | ||||
| 			return // return early on a spazout | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Read all env (with same file loading semantics as Load) but return values as | ||||
| // a map rather than automatically writing values into env | ||||
| func Read(filenames ...string) (envMap map[string]string, err error) { | ||||
| 	filenames = filenamesOrDefault(filenames) | ||||
| 	envMap = make(map[string]string) | ||||
|  | ||||
| 	for _, filename := range filenames { | ||||
| 		individualEnvMap, individualErr := readFile(filename) | ||||
|  | ||||
| 		if individualErr != nil { | ||||
| 			err = individualErr | ||||
| 			return // return early on a spazout | ||||
| 		} | ||||
|  | ||||
| 		for key, value := range individualEnvMap { | ||||
| 			envMap[key] = value | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Parse reads an env file from io.Reader, returning a map of keys and values. | ||||
| func Parse(r io.Reader) (envMap map[string]string, err error) { | ||||
| 	envMap = make(map[string]string) | ||||
|  | ||||
| 	var lines []string | ||||
| 	scanner := bufio.NewScanner(r) | ||||
| 	for scanner.Scan() { | ||||
| 		lines = append(lines, scanner.Text()) | ||||
| 	} | ||||
|  | ||||
| 	if err = scanner.Err(); err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	for _, fullLine := range lines { | ||||
| 		if !isIgnoredLine(fullLine) { | ||||
| 			var key, value string | ||||
| 			key, value, err = parseLine(fullLine, envMap) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 			envMap[key] = value | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| //Unmarshal reads an env file from a string, returning a map of keys and values. | ||||
| func Unmarshal(str string) (envMap map[string]string, err error) { | ||||
| 	return Parse(strings.NewReader(str)) | ||||
| } | ||||
|  | ||||
| // Exec loads env vars from the specified filenames (empty map falls back to default) | ||||
| // then executes the cmd specified. | ||||
| // | ||||
| // Simply hooks up os.Stdin/err/out to the command and calls Run() | ||||
| // | ||||
| // If you want more fine grained control over your command it's recommended | ||||
| // that you use `Load()` or `Read()` and the `os/exec` package yourself. | ||||
| func Exec(filenames []string, cmd string, cmdArgs []string) error { | ||||
| 	Load(filenames...) | ||||
|  | ||||
| 	command := exec.Command(cmd, cmdArgs...) | ||||
| 	command.Stdin = os.Stdin | ||||
| 	command.Stdout = os.Stdout | ||||
| 	command.Stderr = os.Stderr | ||||
| 	return command.Run() | ||||
| } | ||||
|  | ||||
| // Write serializes the given environment and writes it to a file | ||||
| func Write(envMap map[string]string, filename string) error { | ||||
| 	content, error := Marshal(envMap) | ||||
| 	if error != nil { | ||||
| 		return error | ||||
| 	} | ||||
| 	file, error := os.Create(filename) | ||||
| 	if error != nil { | ||||
| 		return error | ||||
| 	} | ||||
| 	_, err := file.WriteString(content) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Marshal outputs the given environment as a dotenv-formatted environment file. | ||||
| // Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. | ||||
| func Marshal(envMap map[string]string) (string, error) { | ||||
| 	lines := make([]string, 0, len(envMap)) | ||||
| 	for k, v := range envMap { | ||||
| 		lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) | ||||
| 	} | ||||
| 	sort.Strings(lines) | ||||
| 	return strings.Join(lines, "\n"), nil | ||||
| } | ||||
|  | ||||
| func filenamesOrDefault(filenames []string) []string { | ||||
| 	if len(filenames) == 0 { | ||||
| 		return []string{".env"} | ||||
| 	} | ||||
| 	return filenames | ||||
| } | ||||
|  | ||||
| func loadFile(filename string, overload bool) error { | ||||
| 	envMap, err := readFile(filename) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	currentEnv := map[string]bool{} | ||||
| 	rawEnv := os.Environ() | ||||
| 	for _, rawEnvLine := range rawEnv { | ||||
| 		key := strings.Split(rawEnvLine, "=")[0] | ||||
| 		currentEnv[key] = true | ||||
| 	} | ||||
|  | ||||
| 	for key, value := range envMap { | ||||
| 		if !currentEnv[key] || overload { | ||||
| 			os.Setenv(key, value) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readFile(filename string) (envMap map[string]string, err error) { | ||||
| 	file, err := os.Open(filename) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	defer file.Close() | ||||
|  | ||||
| 	return Parse(file) | ||||
| } | ||||
|  | ||||
| func parseLine(line string, envMap map[string]string) (key string, value string, err error) { | ||||
| 	if len(line) == 0 { | ||||
| 		err = errors.New("zero length string") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// ditch the comments (but keep quoted hashes) | ||||
| 	if strings.Contains(line, "#") { | ||||
| 		segmentsBetweenHashes := strings.Split(line, "#") | ||||
| 		quotesAreOpen := false | ||||
| 		var segmentsToKeep []string | ||||
| 		for _, segment := range segmentsBetweenHashes { | ||||
| 			if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { | ||||
| 				if quotesAreOpen { | ||||
| 					quotesAreOpen = false | ||||
| 					segmentsToKeep = append(segmentsToKeep, segment) | ||||
| 				} else { | ||||
| 					quotesAreOpen = true | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			if len(segmentsToKeep) == 0 || quotesAreOpen { | ||||
| 				segmentsToKeep = append(segmentsToKeep, segment) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		line = strings.Join(segmentsToKeep, "#") | ||||
| 	} | ||||
|  | ||||
| 	firstEquals := strings.Index(line, "=") | ||||
| 	firstColon := strings.Index(line, ":") | ||||
| 	splitString := strings.SplitN(line, "=", 2) | ||||
| 	if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { | ||||
| 		//this is a yaml-style line | ||||
| 		splitString = strings.SplitN(line, ":", 2) | ||||
| 	} | ||||
|  | ||||
| 	if len(splitString) != 2 { | ||||
| 		err = errors.New("Can't separate key from value") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Parse the key | ||||
| 	key = splitString[0] | ||||
| 	if strings.HasPrefix(key, "export") { | ||||
| 		key = strings.TrimPrefix(key, "export") | ||||
| 	} | ||||
| 	key = strings.Trim(key, " ") | ||||
|  | ||||
| 	// Parse the value | ||||
| 	value = parseValue(splitString[1], envMap) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func parseValue(value string, envMap map[string]string) string { | ||||
|  | ||||
| 	// trim | ||||
| 	value = strings.Trim(value, " ") | ||||
|  | ||||
| 	// check if we've got quoted values or possible escapes | ||||
| 	if len(value) > 1 { | ||||
| 		rs := regexp.MustCompile(`\A'(.*)'\z`) | ||||
| 		singleQuotes := rs.FindStringSubmatch(value) | ||||
|  | ||||
| 		rd := regexp.MustCompile(`\A"(.*)"\z`) | ||||
| 		doubleQuotes := rd.FindStringSubmatch(value) | ||||
|  | ||||
| 		if singleQuotes != nil || doubleQuotes != nil { | ||||
| 			// pull the quotes off the edges | ||||
| 			value = value[1 : len(value)-1] | ||||
| 		} | ||||
|  | ||||
| 		if doubleQuotes != nil { | ||||
| 			// expand newlines | ||||
| 			escapeRegex := regexp.MustCompile(`\\.`) | ||||
| 			value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { | ||||
| 				c := strings.TrimPrefix(match, `\`) | ||||
| 				switch c { | ||||
| 				case "n": | ||||
| 					return "\n" | ||||
| 				case "r": | ||||
| 					return "\r" | ||||
| 				default: | ||||
| 					return match | ||||
| 				} | ||||
| 			}) | ||||
| 			// unescape characters | ||||
| 			e := regexp.MustCompile(`\\([^$])`) | ||||
| 			value = e.ReplaceAllString(value, "$1") | ||||
| 		} | ||||
|  | ||||
| 		if singleQuotes == nil { | ||||
| 			value = expandVariables(value, envMap) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return value | ||||
| } | ||||
|  | ||||
| func expandVariables(v string, m map[string]string) string { | ||||
| 	r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) | ||||
|  | ||||
| 	return r.ReplaceAllStringFunc(v, func(s string) string { | ||||
| 		submatch := r.FindStringSubmatch(s) | ||||
|  | ||||
| 		if submatch == nil { | ||||
| 			return s | ||||
| 		} | ||||
| 		if submatch[1] == "\\" || submatch[2] == "(" { | ||||
| 			return submatch[0][1:] | ||||
| 		} else if submatch[4] != "" { | ||||
| 			return m[submatch[4]] | ||||
| 		} | ||||
| 		return s | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func isIgnoredLine(line string) bool { | ||||
| 	trimmedLine := strings.Trim(line, " \n\t") | ||||
| 	return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") | ||||
| } | ||||
|  | ||||
| func doubleQuoteEscape(line string) string { | ||||
| 	for _, c := range doubleQuoteSpecialChars { | ||||
| 		toReplace := "\\" + string(c) | ||||
| 		if c == '\n' { | ||||
| 			toReplace = `\n` | ||||
| 		} | ||||
| 		if c == '\r' { | ||||
| 			toReplace = `\r` | ||||
| 		} | ||||
| 		line = strings.Replace(line, string(c), toReplace, -1) | ||||
| 	} | ||||
| 	return line | ||||
| } | ||||
							
								
								
									
										16
									
								
								vendor/github.com/mattn/go-shellwords/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								vendor/github.com/mattn/go-shellwords/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| arch: | ||||
|   - amd64 | ||||
|   - ppc64le | ||||
| language: go | ||||
| sudo: false | ||||
| go: | ||||
|   - tip | ||||
|  | ||||
| before_install: | ||||
|   - go get -t -v ./... | ||||
|  | ||||
| script: | ||||
|   - ./go.test.sh | ||||
|  | ||||
| after_success: | ||||
|   - bash <(curl -s https://codecov.io/bash) | ||||
							
								
								
									
										21
									
								
								vendor/github.com/mattn/go-shellwords/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/mattn/go-shellwords/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright (c) 2017 Yasuhiro Matsumoto | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| SOFTWARE. | ||||
							
								
								
									
										55
									
								
								vendor/github.com/mattn/go-shellwords/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								vendor/github.com/mattn/go-shellwords/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | ||||
| # go-shellwords | ||||
|  | ||||
| [](https://codecov.io/gh/mattn/go-shellwords) | ||||
| [](https://travis-ci.org/mattn/go-shellwords) | ||||
| [](https://pkg.go.dev/github.com/mattn/go-shellwords) | ||||
| [](https://github.com/mattn/go-shellwords/actions) | ||||
|  | ||||
| Parse line as shell words. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| ```go | ||||
| args, err := shellwords.Parse("./foo --bar=baz") | ||||
| // args should be ["./foo", "--bar=baz"] | ||||
| ``` | ||||
|  | ||||
| ```go | ||||
| envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") | ||||
| // envs should be ["FOO=foo", "BAR=baz"] | ||||
| // args should be ["./foo", "--bar=baz"] | ||||
| ``` | ||||
|  | ||||
| ```go | ||||
| os.Setenv("FOO", "bar") | ||||
| p := shellwords.NewParser() | ||||
| p.ParseEnv = true | ||||
| args, err := p.Parse("./foo $FOO") | ||||
| // args should be ["./foo", "bar"] | ||||
| ``` | ||||
|  | ||||
| ```go | ||||
| p := shellwords.NewParser() | ||||
| p.ParseBacktick = true | ||||
| args, err := p.Parse("./foo `echo $SHELL`") | ||||
| // args should be ["./foo", "/bin/bash"] | ||||
| ``` | ||||
|  | ||||
| ```go | ||||
| shellwords.ParseBacktick = true | ||||
| p := shellwords.NewParser() | ||||
| args, err := p.Parse("./foo `echo $SHELL`") | ||||
| // args should be ["./foo", "/bin/bash"] | ||||
| ``` | ||||
|  | ||||
| # Thanks | ||||
|  | ||||
| This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). | ||||
|  | ||||
| # License | ||||
|  | ||||
| under the MIT License: http://mattn.mit-license.org/2017 | ||||
|  | ||||
| # Author | ||||
|  | ||||
| Yasuhiro Matsumoto (a.k.a mattn) | ||||
							
								
								
									
										3
									
								
								vendor/github.com/mattn/go-shellwords/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/mattn/go-shellwords/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| module github.com/mattn/go-shellwords | ||||
|  | ||||
| go 1.13 | ||||
							
								
								
									
										12
									
								
								vendor/github.com/mattn/go-shellwords/go.test.sh
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/mattn/go-shellwords/go.test.sh
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -e | ||||
| echo "" > coverage.txt | ||||
|  | ||||
| for d in $(go list ./... | grep -v vendor); do | ||||
|     go test -coverprofile=profile.out -covermode=atomic "$d" | ||||
|     if [ -f profile.out ]; then | ||||
|         cat profile.out >> coverage.txt | ||||
|         rm profile.out | ||||
|     fi | ||||
| done | ||||
							
								
								
									
										317
									
								
								vendor/github.com/mattn/go-shellwords/shellwords.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										317
									
								
								vendor/github.com/mattn/go-shellwords/shellwords.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,317 @@ | ||||
| package shellwords | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ParseEnv      bool = false | ||||
| 	ParseBacktick bool = false | ||||
| ) | ||||
|  | ||||
| func isSpace(r rune) bool { | ||||
| 	switch r { | ||||
| 	case ' ', '\t', '\r', '\n': | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func replaceEnv(getenv func(string) string, s string) string { | ||||
| 	if getenv == nil { | ||||
| 		getenv = os.Getenv | ||||
| 	} | ||||
|  | ||||
| 	var buf bytes.Buffer | ||||
| 	rs := []rune(s) | ||||
| 	for i := 0; i < len(rs); i++ { | ||||
| 		r := rs[i] | ||||
| 		if r == '\\' { | ||||
| 			i++ | ||||
| 			if i == len(rs) { | ||||
| 				break | ||||
| 			} | ||||
| 			buf.WriteRune(rs[i]) | ||||
| 			continue | ||||
| 		} else if r == '$' { | ||||
| 			i++ | ||||
| 			if i == len(rs) { | ||||
| 				buf.WriteRune(r) | ||||
| 				break | ||||
| 			} | ||||
| 			if rs[i] == 0x7b { | ||||
| 				i++ | ||||
| 				p := i | ||||
| 				for ; i < len(rs); i++ { | ||||
| 					r = rs[i] | ||||
| 					if r == '\\' { | ||||
| 						i++ | ||||
| 						if i == len(rs) { | ||||
| 							return s | ||||
| 						} | ||||
| 						continue | ||||
| 					} | ||||
| 					if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 				if r != 0x7d { | ||||
| 					return s | ||||
| 				} | ||||
| 				if i > p { | ||||
| 					buf.WriteString(getenv(s[p:i])) | ||||
| 				} | ||||
| 			} else { | ||||
| 				p := i | ||||
| 				for ; i < len(rs); i++ { | ||||
| 					r := rs[i] | ||||
| 					if r == '\\' { | ||||
| 						i++ | ||||
| 						if i == len(rs) { | ||||
| 							return s | ||||
| 						} | ||||
| 						continue | ||||
| 					} | ||||
| 					if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 				if i > p { | ||||
| 					buf.WriteString(getenv(s[p:i])) | ||||
| 					i-- | ||||
| 				} else { | ||||
| 					buf.WriteString(s[p:]) | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			buf.WriteRune(r) | ||||
| 		} | ||||
| 	} | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| type Parser struct { | ||||
| 	ParseEnv      bool | ||||
| 	ParseBacktick bool | ||||
| 	Position      int | ||||
| 	Dir           string | ||||
|  | ||||
| 	// If ParseEnv is true, use this for getenv. | ||||
| 	// If nil, use os.Getenv. | ||||
| 	Getenv func(string) string | ||||
| } | ||||
|  | ||||
| func NewParser() *Parser { | ||||
| 	return &Parser{ | ||||
| 		ParseEnv:      ParseEnv, | ||||
| 		ParseBacktick: ParseBacktick, | ||||
| 		Position:      0, | ||||
| 		Dir:           "", | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type argType int | ||||
|  | ||||
| const ( | ||||
| 	argNo argType = iota | ||||
| 	argSingle | ||||
| 	argQuoted | ||||
| ) | ||||
|  | ||||
| func (p *Parser) Parse(line string) ([]string, error) { | ||||
| 	args := []string{} | ||||
| 	buf := "" | ||||
| 	var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool | ||||
| 	backtick := "" | ||||
|  | ||||
| 	pos := -1 | ||||
| 	got := argNo | ||||
|  | ||||
| 	i := -1 | ||||
| loop: | ||||
| 	for _, r := range line { | ||||
| 		i++ | ||||
| 		if escaped { | ||||
| 			buf += string(r) | ||||
| 			escaped = false | ||||
| 			got = argSingle | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if r == '\\' { | ||||
| 			if singleQuoted { | ||||
| 				buf += string(r) | ||||
| 			} else { | ||||
| 				escaped = true | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if isSpace(r) { | ||||
| 			if singleQuoted || doubleQuoted || backQuote || dollarQuote { | ||||
| 				buf += string(r) | ||||
| 				backtick += string(r) | ||||
| 			} else if got != argNo { | ||||
| 				if p.ParseEnv { | ||||
| 					if got == argSingle { | ||||
| 						parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} | ||||
| 						strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) | ||||
| 						if err != nil { | ||||
| 							return nil, err | ||||
| 						} | ||||
| 						args = append(args, strs...) | ||||
| 					} else { | ||||
| 						args = append(args, replaceEnv(p.Getenv, buf)) | ||||
| 					} | ||||
| 				} else { | ||||
| 					args = append(args, buf) | ||||
| 				} | ||||
| 				buf = "" | ||||
| 				got = argNo | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		switch r { | ||||
| 		case '`': | ||||
| 			if !singleQuoted && !doubleQuoted && !dollarQuote { | ||||
| 				if p.ParseBacktick { | ||||
| 					if backQuote { | ||||
| 						out, err := shellRun(backtick, p.Dir) | ||||
| 						if err != nil { | ||||
| 							return nil, err | ||||
| 						} | ||||
| 						buf = buf[:len(buf)-len(backtick)] + out | ||||
| 					} | ||||
| 					backtick = "" | ||||
| 					backQuote = !backQuote | ||||
| 					continue | ||||
| 				} | ||||
| 				backtick = "" | ||||
| 				backQuote = !backQuote | ||||
| 			} | ||||
| 		case ')': | ||||
| 			if !singleQuoted && !doubleQuoted && !backQuote { | ||||
| 				if p.ParseBacktick { | ||||
| 					if dollarQuote { | ||||
| 						out, err := shellRun(backtick, p.Dir) | ||||
| 						if err != nil { | ||||
| 							return nil, err | ||||
| 						} | ||||
| 						buf = buf[:len(buf)-len(backtick)-2] + out | ||||
| 					} | ||||
| 					backtick = "" | ||||
| 					dollarQuote = !dollarQuote | ||||
| 					continue | ||||
| 				} | ||||
| 				backtick = "" | ||||
| 				dollarQuote = !dollarQuote | ||||
| 			} | ||||
| 		case '(': | ||||
| 			if !singleQuoted && !doubleQuoted && !backQuote { | ||||
| 				if !dollarQuote && strings.HasSuffix(buf, "$") { | ||||
| 					dollarQuote = true | ||||
| 					buf += "(" | ||||
| 					continue | ||||
| 				} else { | ||||
| 					return nil, errors.New("invalid command line string") | ||||
| 				} | ||||
| 			} | ||||
| 		case '"': | ||||
| 			if !singleQuoted && !dollarQuote { | ||||
| 				if doubleQuoted { | ||||
| 					got = argQuoted | ||||
| 				} | ||||
| 				doubleQuoted = !doubleQuoted | ||||
| 				continue | ||||
| 			} | ||||
| 		case '\'': | ||||
| 			if !doubleQuoted && !dollarQuote { | ||||
| 				if singleQuoted { | ||||
| 					got = argQuoted | ||||
| 				} | ||||
| 				singleQuoted = !singleQuoted | ||||
| 				continue | ||||
| 			} | ||||
| 		case ';', '&', '|', '<', '>': | ||||
| 			if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { | ||||
| 				if r == '>' && len(buf) > 0 { | ||||
| 					if c := buf[0]; '0' <= c && c <= '9' { | ||||
| 						i -= 1 | ||||
| 						got = argNo | ||||
| 					} | ||||
| 				} | ||||
| 				pos = i | ||||
| 				break loop | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		got = argSingle | ||||
| 		buf += string(r) | ||||
| 		if backQuote || dollarQuote { | ||||
| 			backtick += string(r) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if got != argNo { | ||||
| 		if p.ParseEnv { | ||||
| 			if got == argSingle { | ||||
| 				parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} | ||||
| 				strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) | ||||
| 				if err != nil { | ||||
| 					return nil, err | ||||
| 				} | ||||
| 				args = append(args, strs...) | ||||
| 			} else { | ||||
| 				args = append(args, replaceEnv(p.Getenv, buf)) | ||||
| 			} | ||||
| 		} else { | ||||
| 			args = append(args, buf) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { | ||||
| 		return nil, errors.New("invalid command line string") | ||||
| 	} | ||||
|  | ||||
| 	p.Position = pos | ||||
|  | ||||
| 	return args, nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { | ||||
| 	_args, err := p.Parse(line) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 	envs = []string{} | ||||
| 	args = []string{} | ||||
| 	parsingEnv := true | ||||
| 	for _, arg := range _args { | ||||
| 		if parsingEnv && isEnv(arg) { | ||||
| 			envs = append(envs, arg) | ||||
| 		} else { | ||||
| 			if parsingEnv { | ||||
| 				parsingEnv = false | ||||
| 			} | ||||
| 			args = append(args, arg) | ||||
| 		} | ||||
| 	} | ||||
| 	return envs, args, nil | ||||
| } | ||||
|  | ||||
| func isEnv(arg string) bool { | ||||
| 	return len(strings.Split(arg, "=")) == 2 | ||||
| } | ||||
|  | ||||
| func Parse(line string) ([]string, error) { | ||||
| 	return NewParser().Parse(line) | ||||
| } | ||||
|  | ||||
| func ParseWithEnvs(line string) (envs []string, args []string, err error) { | ||||
| 	return NewParser().ParseWithEnvs(line) | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/github.com/mattn/go-shellwords/util_posix.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/mattn/go-shellwords/util_posix.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| // +build !windows | ||||
|  | ||||
| package shellwords | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func shellRun(line, dir string) (string, error) { | ||||
| 	var shell string | ||||
| 	if shell = os.Getenv("SHELL"); shell == "" { | ||||
| 		shell = "/bin/sh" | ||||
| 	} | ||||
| 	cmd := exec.Command(shell, "-c", line) | ||||
| 	if dir != "" { | ||||
| 		cmd.Dir = dir | ||||
| 	} | ||||
| 	b, err := cmd.Output() | ||||
| 	if err != nil { | ||||
| 		if eerr, ok := err.(*exec.ExitError); ok { | ||||
| 			b = eerr.Stderr | ||||
| 		} | ||||
| 		return "", fmt.Errorf("%s: %w", string(b), err) | ||||
| 	} | ||||
| 	return strings.TrimSpace(string(b)), nil | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/github.com/mattn/go-shellwords/util_windows.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/mattn/go-shellwords/util_windows.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| // +build windows | ||||
|  | ||||
| package shellwords | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func shellRun(line, dir string) (string, error) { | ||||
| 	var shell string | ||||
| 	if shell = os.Getenv("COMSPEC"); shell == "" { | ||||
| 		shell = "cmd" | ||||
| 	} | ||||
| 	cmd := exec.Command(shell, "/c", line) | ||||
| 	if dir != "" { | ||||
| 		cmd.Dir = dir | ||||
| 	} | ||||
| 	b, err := cmd.Output() | ||||
| 	if err != nil { | ||||
| 		if eerr, ok := err.(*exec.ExitError); ok { | ||||
| 			b = eerr.Stderr | ||||
| 		} | ||||
| 		return "", fmt.Errorf("%s: %w", string(b), err) | ||||
| 	} | ||||
| 	return strings.TrimSpace(string(b)), nil | ||||
| } | ||||
							
								
								
									
										8
									
								
								vendor/github.com/mitchellh/mapstructure/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/mitchellh/mapstructure/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,8 +0,0 @@ | ||||
| language: go | ||||
|  | ||||
| go: | ||||
|   - "1.11.x" | ||||
|   - tip | ||||
|  | ||||
| script: | ||||
|   - go test | ||||
							
								
								
									
										52
									
								
								vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,3 +1,55 @@ | ||||
| ## unreleased | ||||
|  | ||||
| * Fix regression where `*time.Time` value would be set to empty and not be sent  | ||||
|   to decode hooks properly [GH-232] | ||||
|  | ||||
| ## 1.4.0 | ||||
|  | ||||
| * A new decode hook type `DecodeHookFuncValue` has been added that has | ||||
|   access to the full values. [GH-183] | ||||
| * Squash is now supported with embedded fields that are struct pointers [GH-205] | ||||
| * Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] | ||||
|  | ||||
| ## 1.3.3 | ||||
|  | ||||
| * Decoding maps from maps creates a settable value for decode hooks [GH-203] | ||||
|  | ||||
| ## 1.3.2 | ||||
|  | ||||
| * Decode into interface type with a struct value is supported [GH-187] | ||||
|  | ||||
| ## 1.3.1 | ||||
|  | ||||
| * Squash should only squash embedded structs. [GH-194] | ||||
|  | ||||
| ## 1.3.0 | ||||
|  | ||||
| * Added `",omitempty"` support. This will ignore zero values in the source | ||||
|   structure when encoding. [GH-145] | ||||
|  | ||||
| ## 1.2.3 | ||||
|  | ||||
| * Fix duplicate entries in Keys list with pointer values. [GH-185] | ||||
|  | ||||
| ## 1.2.2 | ||||
|  | ||||
| * Do not add unsettable (unexported) values to the unused metadata key | ||||
|   or "remain" value. [GH-150] | ||||
|  | ||||
| ## 1.2.1 | ||||
|  | ||||
| * Go modules checksum mismatch fix | ||||
|  | ||||
| ## 1.2.0 | ||||
|  | ||||
| * Added support to capture unused values in a field using the `",remain"` value | ||||
|   in the mapstructure tag. There is an example to showcase usage. | ||||
| * Added `DecoderConfig` option to always squash embedded structs | ||||
| * `json.Number` can decode into `uint` types | ||||
| * Empty slices are preserved and not replaced with nil slices | ||||
| * Fix panic that can occur in when decoding a map into a nil slice of structs | ||||
| * Improved package documentation for godoc | ||||
|  | ||||
| ## 1.1.2 | ||||
|  | ||||
| * Fix error when decode hook decodes interface implementation into interface | ||||
|   | ||||
							
								
								
									
										71
									
								
								vendor/github.com/mitchellh/mapstructure/decode_hooks.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										71
									
								
								vendor/github.com/mitchellh/mapstructure/decode_hooks.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| package mapstructure | ||||
|  | ||||
| import ( | ||||
| 	"encoding" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| @@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { | ||||
| 	// Create variables here so we can reference them with the reflect pkg | ||||
| 	var f1 DecodeHookFuncType | ||||
| 	var f2 DecodeHookFuncKind | ||||
| 	var f3 DecodeHookFuncValue | ||||
|  | ||||
| 	// Fill in the variables into this interface and the rest is done | ||||
| 	// automatically using the reflect package. | ||||
| 	potential := []interface{}{f1, f2} | ||||
| 	potential := []interface{}{f1, f2, f3} | ||||
|  | ||||
| 	v := reflect.ValueOf(h) | ||||
| 	vt := v.Type() | ||||
| @@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { | ||||
| // that took reflect.Kind instead of reflect.Type. | ||||
| func DecodeHookExec( | ||||
| 	raw DecodeHookFunc, | ||||
| 	from reflect.Type, to reflect.Type, | ||||
| 	data interface{}) (interface{}, error) { | ||||
| 	from reflect.Value, to reflect.Value) (interface{}, error) { | ||||
|  | ||||
| 	switch f := typedDecodeHook(raw).(type) { | ||||
| 	case DecodeHookFuncType: | ||||
| 		return f(from, to, data) | ||||
| 		return f(from.Type(), to.Type(), from.Interface()) | ||||
| 	case DecodeHookFuncKind: | ||||
| 		return f(from.Kind(), to.Kind(), data) | ||||
| 		return f(from.Kind(), to.Kind(), from.Interface()) | ||||
| 	case DecodeHookFuncValue: | ||||
| 		return f(from, to) | ||||
| 	default: | ||||
| 		return nil, errors.New("invalid decode hook signature") | ||||
| 	} | ||||
| @@ -56,22 +60,16 @@ func DecodeHookExec( | ||||
| // The composed funcs are called in order, with the result of the | ||||
| // previous transformation. | ||||
| func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { | ||||
| 	return func( | ||||
| 		f reflect.Type, | ||||
| 		t reflect.Type, | ||||
| 		data interface{}) (interface{}, error) { | ||||
| 	return func(f reflect.Value, t reflect.Value) (interface{}, error) { | ||||
| 		var err error | ||||
| 		var data interface{} | ||||
| 		newFrom := f | ||||
| 		for _, f1 := range fs { | ||||
| 			data, err = DecodeHookExec(f1, f, t, data) | ||||
| 			data, err = DecodeHookExec(f1, newFrom, t) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			// Modify the from kind to be correct with the new data | ||||
| 			f = nil | ||||
| 			if val := reflect.ValueOf(data); val.IsValid() { | ||||
| 				f = val.Type() | ||||
| 			} | ||||
| 			newFrom = reflect.ValueOf(data) | ||||
| 		} | ||||
|  | ||||
| 		return data, nil | ||||
| @@ -215,3 +213,44 @@ func WeaklyTypedHook( | ||||
|  | ||||
| 	return data, nil | ||||
| } | ||||
|  | ||||
| func RecursiveStructToMapHookFunc() DecodeHookFunc { | ||||
| 	return func(f reflect.Value, t reflect.Value) (interface{}, error) { | ||||
| 		if f.Kind() != reflect.Struct { | ||||
| 			return f.Interface(), nil | ||||
| 		} | ||||
|  | ||||
| 		var i interface{} = struct{}{} | ||||
| 		if t.Type() != reflect.TypeOf(&i).Elem() { | ||||
| 			return f.Interface(), nil | ||||
| 		} | ||||
|  | ||||
| 		m := make(map[string]interface{}) | ||||
| 		t.Set(reflect.ValueOf(m)) | ||||
|  | ||||
| 		return f.Interface(), nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TextUnmarshallerHookFunc returns a DecodeHookFunc that applies | ||||
| // strings to the UnmarshalText function, when the target type | ||||
| // implements the encoding.TextUnmarshaler interface | ||||
| func TextUnmarshallerHookFunc() DecodeHookFuncType { | ||||
| 	return func( | ||||
| 		f reflect.Type, | ||||
| 		t reflect.Type, | ||||
| 		data interface{}) (interface{}, error) { | ||||
| 		if f.Kind() != reflect.String { | ||||
| 			return data, nil | ||||
| 		} | ||||
| 		result := reflect.New(t).Interface() | ||||
| 		unmarshaller, ok := result.(encoding.TextUnmarshaler) | ||||
| 		if !ok { | ||||
| 			return data, nil | ||||
| 		} | ||||
| 		if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return result, nil | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/mitchellh/mapstructure/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/mitchellh/mapstructure/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1 +1,3 @@ | ||||
| module github.com/mitchellh/mapstructure | ||||
|  | ||||
| go 1.14 | ||||
|   | ||||
							
								
								
									
										461
									
								
								vendor/github.com/mitchellh/mapstructure/mapstructure.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										461
									
								
								vendor/github.com/mitchellh/mapstructure/mapstructure.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,10 +1,161 @@ | ||||
| // Package mapstructure exposes functionality to convert an arbitrary | ||||
| // map[string]interface{} into a native Go structure. | ||||
| // Package mapstructure exposes functionality to convert one arbitrary | ||||
| // Go type into another, typically to convert a map[string]interface{} | ||||
| // into a native Go structure. | ||||
| // | ||||
| // The Go structure can be arbitrarily complex, containing slices, | ||||
| // other structs, etc. and the decoder will properly decode nested | ||||
| // maps and so on into the proper structures in the native Go struct. | ||||
| // See the examples to see what the decoder is capable of. | ||||
| // | ||||
| // The simplest function to start with is Decode. | ||||
| // | ||||
| // Field Tags | ||||
| // | ||||
| // When decoding to a struct, mapstructure will use the field name by | ||||
| // default to perform the mapping. For example, if a struct has a field | ||||
| // "Username" then mapstructure will look for a key in the source value | ||||
| // of "username" (case insensitive). | ||||
| // | ||||
| //     type User struct { | ||||
| //         Username string | ||||
| //     } | ||||
| // | ||||
| // You can change the behavior of mapstructure by using struct tags. | ||||
| // The default struct tag that mapstructure looks for is "mapstructure" | ||||
| // but you can customize it using DecoderConfig. | ||||
| // | ||||
| // Renaming Fields | ||||
| // | ||||
| // To rename the key that mapstructure looks for, use the "mapstructure" | ||||
| // tag and set a value directly. For example, to change the "username" example | ||||
| // above to "user": | ||||
| // | ||||
| //     type User struct { | ||||
| //         Username string `mapstructure:"user"` | ||||
| //     } | ||||
| // | ||||
| // Embedded Structs and Squashing | ||||
| // | ||||
| // Embedded structs are treated as if they're another field with that name. | ||||
| // By default, the two structs below are equivalent when decoding with | ||||
| // mapstructure: | ||||
| // | ||||
| //     type Person struct { | ||||
| //         Name string | ||||
| //     } | ||||
| // | ||||
| //     type Friend struct { | ||||
| //         Person | ||||
| //     } | ||||
| // | ||||
| //     type Friend struct { | ||||
| //         Person Person | ||||
| //     } | ||||
| // | ||||
| // This would require an input that looks like below: | ||||
| // | ||||
| //     map[string]interface{}{ | ||||
| //         "person": map[string]interface{}{"name": "alice"}, | ||||
| //     } | ||||
| // | ||||
| // If your "person" value is NOT nested, then you can append ",squash" to | ||||
| // your tag value and mapstructure will treat it as if the embedded struct | ||||
| // were part of the struct directly. Example: | ||||
| // | ||||
| //     type Friend struct { | ||||
| //         Person `mapstructure:",squash"` | ||||
| //     } | ||||
| // | ||||
| // Now the following input would be accepted: | ||||
| // | ||||
| //     map[string]interface{}{ | ||||
| //         "name": "alice", | ||||
| //     } | ||||
| // | ||||
| // When decoding from a struct to a map, the squash tag squashes the struct | ||||
| // fields into a single map. Using the example structs from above: | ||||
| // | ||||
| //     Friend{Person: Person{Name: "alice"}} | ||||
| // | ||||
| // Will be decoded into a map: | ||||
| // | ||||
| //     map[string]interface{}{ | ||||
| //         "name": "alice", | ||||
| //     } | ||||
| // | ||||
| // DecoderConfig has a field that changes the behavior of mapstructure | ||||
| // to always squash embedded structs. | ||||
| // | ||||
| // Remainder Values | ||||
| // | ||||
| // If there are any unmapped keys in the source value, mapstructure by | ||||
| // default will silently ignore them. You can error by setting ErrorUnused | ||||
| // in DecoderConfig. If you're using Metadata you can also maintain a slice | ||||
| // of the unused keys. | ||||
| // | ||||
| // You can also use the ",remain" suffix on your tag to collect all unused | ||||
| // values in a map. The field with this tag MUST be a map type and should | ||||
| // probably be a "map[string]interface{}" or "map[interface{}]interface{}". | ||||
| // See example below: | ||||
| // | ||||
| //     type Friend struct { | ||||
| //         Name  string | ||||
| //         Other map[string]interface{} `mapstructure:",remain"` | ||||
| //     } | ||||
| // | ||||
| // Given the input below, Other would be populated with the other | ||||
| // values that weren't used (everything but "name"): | ||||
| // | ||||
| //     map[string]interface{}{ | ||||
| //         "name":    "bob", | ||||
| //         "address": "123 Maple St.", | ||||
| //     } | ||||
| // | ||||
| // Omit Empty Values | ||||
| // | ||||
| // When decoding from a struct to any other value, you may use the | ||||
| // ",omitempty" suffix on your tag to omit that value if it equates to | ||||
| // the zero value. The zero value of all types is specified in the Go | ||||
| // specification. | ||||
| // | ||||
| // For example, the zero type of a numeric type is zero ("0"). If the struct | ||||
| // field value is zero and a numeric type, the field is empty, and it won't | ||||
| // be encoded into the destination type. | ||||
| // | ||||
| //     type Source { | ||||
| //         Age int `mapstructure:",omitempty"` | ||||
| //     } | ||||
| // | ||||
| // Unexported fields | ||||
| // | ||||
| // Since unexported (private) struct fields cannot be set outside the package | ||||
| // where they are defined, the decoder will simply skip them. | ||||
| // | ||||
| // For this output type definition: | ||||
| // | ||||
| //     type Exported struct { | ||||
| //         private string // this unexported field will be skipped | ||||
| //         Public string | ||||
| //     } | ||||
| // | ||||
| // Using this map as input: | ||||
| // | ||||
| //     map[string]interface{}{ | ||||
| //         "private": "I will be ignored", | ||||
| //         "Public":  "I made it through!", | ||||
| //     } | ||||
| // | ||||
| // The following struct will be decoded: | ||||
| // | ||||
| //     type Exported struct { | ||||
| //         private: "" // field is left with an empty string (zero value) | ||||
| //         Public: "I made it through!" | ||||
| //     } | ||||
| // | ||||
| // Other Configuration | ||||
| // | ||||
| // mapstructure is highly configurable. See the DecoderConfig struct | ||||
| // for other features and options that are supported. | ||||
| package mapstructure | ||||
|  | ||||
| import ( | ||||
| @@ -21,10 +172,11 @@ import ( | ||||
| // data transformations. See "DecodeHook" in the DecoderConfig | ||||
| // struct. | ||||
| // | ||||
| // The type should be DecodeHookFuncType or DecodeHookFuncKind. | ||||
| // Either is accepted. Types are a superset of Kinds (Types can return | ||||
| // Kinds) and are generally a richer thing to use, but Kinds are simpler | ||||
| // if you only need those. | ||||
| // The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or | ||||
| // DecodeHookFuncValue. | ||||
| // Values are a superset of Types (Values can return types), and Types are a | ||||
| // superset of Kinds (Types can return Kinds) and are generally a richer thing | ||||
| // to use, but Kinds are simpler if you only need those. | ||||
| // | ||||
| // The reason DecodeHookFunc is multi-typed is for backwards compatibility: | ||||
| // we started with Kinds and then realized Types were the better solution, | ||||
| @@ -40,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface | ||||
| // source and target types. | ||||
| type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) | ||||
|  | ||||
| // DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target | ||||
| // values. | ||||
| type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) | ||||
|  | ||||
| // DecoderConfig is the configuration that is used to create a new decoder | ||||
| // and allows customization of various aspects of decoding. | ||||
| type DecoderConfig struct { | ||||
| 	// DecodeHook, if set, will be called before any decoding and any | ||||
| 	// type conversion (if WeaklyTypedInput is on). This lets you modify | ||||
| 	// the values before they're set down onto the resulting struct. | ||||
| 	// the values before they're set down onto the resulting struct. The | ||||
| 	// DecodeHook is called for every map and value in the input. This means | ||||
| 	// that if a struct has embedded fields with squash tags the decode hook | ||||
| 	// is called only once with all of the input data, not once for each | ||||
| 	// embedded struct. | ||||
| 	// | ||||
| 	// If an error is returned, the entire decode will fail with that | ||||
| 	// error. | ||||
| 	// If an error is returned, the entire decode will fail with that error. | ||||
| 	DecodeHook DecodeHookFunc | ||||
|  | ||||
| 	// If ErrorUnused is true, then it is an error for there to exist | ||||
| @@ -80,6 +239,14 @@ type DecoderConfig struct { | ||||
| 	// | ||||
| 	WeaklyTypedInput bool | ||||
|  | ||||
| 	// Squash will squash embedded structs.  A squash tag may also be | ||||
| 	// added to an individual struct field using a tag.  For example: | ||||
| 	// | ||||
| 	//  type Parent struct { | ||||
| 	//      Child `mapstructure:",squash"` | ||||
| 	//  } | ||||
| 	Squash bool | ||||
|  | ||||
| 	// Metadata is the struct that will contain extra metadata about | ||||
| 	// the decoding. If this is nil, then no metadata will be tracked. | ||||
| 	Metadata *Metadata | ||||
| @@ -261,9 +428,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e | ||||
| 	if d.config.DecodeHook != nil { | ||||
| 		// We have a DecodeHook, so let's pre-process the input. | ||||
| 		var err error | ||||
| 		input, err = DecodeHookExec( | ||||
| 			d.config.DecodeHook, | ||||
| 			inputVal.Type(), outVal.Type(), input) | ||||
| 		input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("error decoding '%s': %s", name, err) | ||||
| 		} | ||||
| @@ -271,6 +436,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e | ||||
|  | ||||
| 	var err error | ||||
| 	outputKind := getKind(outVal) | ||||
| 	addMetaKey := true | ||||
| 	switch outputKind { | ||||
| 	case reflect.Bool: | ||||
| 		err = d.decodeBool(name, input, outVal) | ||||
| @@ -289,7 +455,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e | ||||
| 	case reflect.Map: | ||||
| 		err = d.decodeMap(name, input, outVal) | ||||
| 	case reflect.Ptr: | ||||
| 		err = d.decodePtr(name, input, outVal) | ||||
| 		addMetaKey, err = d.decodePtr(name, input, outVal) | ||||
| 	case reflect.Slice: | ||||
| 		err = d.decodeSlice(name, input, outVal) | ||||
| 	case reflect.Array: | ||||
| @@ -303,7 +469,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e | ||||
|  | ||||
| 	// If we reached here, then we successfully decoded SOMETHING, so | ||||
| 	// mark the key as used if we're tracking metainput. | ||||
| 	if d.config.Metadata != nil && name != "" { | ||||
| 	if addMetaKey && d.config.Metadata != nil && name != "" { | ||||
| 		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) | ||||
| 	} | ||||
|  | ||||
| @@ -314,7 +480,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e | ||||
| // value to "data" of that type. | ||||
| func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { | ||||
| 	if val.IsValid() && val.Elem().IsValid() { | ||||
| 		return d.decode(name, data, val.Elem()) | ||||
| 		elem := val.Elem() | ||||
|  | ||||
| 		// If we can't address this element, then its not writable. Instead, | ||||
| 		// we make a copy of the value (which is a pointer and therefore | ||||
| 		// writable), decode into that, and replace the whole value. | ||||
| 		copied := false | ||||
| 		if !elem.CanAddr() { | ||||
| 			copied = true | ||||
|  | ||||
| 			// Make *T | ||||
| 			copy := reflect.New(elem.Type()) | ||||
|  | ||||
| 			// *T = elem | ||||
| 			copy.Elem().Set(elem) | ||||
|  | ||||
| 			// Set elem so we decode into it | ||||
| 			elem = copy | ||||
| 		} | ||||
|  | ||||
| 		// Decode. If we have an error then return. We also return right | ||||
| 		// away if we're not a copy because that means we decoded directly. | ||||
| 		if err := d.decode(name, data, elem); err != nil || !copied { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		// If we're a copy, we need to set te final result | ||||
| 		val.Set(elem.Elem()) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	dataVal := reflect.ValueOf(data) | ||||
| @@ -386,8 +579,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) | ||||
|  | ||||
| 	if !converted { | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| @@ -412,7 +605,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er | ||||
| 			val.SetInt(0) | ||||
| 		} | ||||
| 	case dataKind == reflect.String && d.config.WeaklyTypedInput: | ||||
| 		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) | ||||
| 		str := dataVal.String() | ||||
| 		if str == "" { | ||||
| 			str = "0" | ||||
| 		} | ||||
|  | ||||
| 		i, err := strconv.ParseInt(str, 0, val.Type().Bits()) | ||||
| 		if err == nil { | ||||
| 			val.SetInt(i) | ||||
| 		} else { | ||||
| @@ -428,8 +626,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er | ||||
| 		val.SetInt(i) | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| @@ -438,6 +636,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er | ||||
| func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { | ||||
| 	dataVal := reflect.Indirect(reflect.ValueOf(data)) | ||||
| 	dataKind := getKind(dataVal) | ||||
| 	dataType := dataVal.Type() | ||||
|  | ||||
| 	switch { | ||||
| 	case dataKind == reflect.Int: | ||||
| @@ -463,16 +662,33 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e | ||||
| 			val.SetUint(0) | ||||
| 		} | ||||
| 	case dataKind == reflect.String && d.config.WeaklyTypedInput: | ||||
| 		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) | ||||
| 		str := dataVal.String() | ||||
| 		if str == "" { | ||||
| 			str = "0" | ||||
| 		} | ||||
|  | ||||
| 		i, err := strconv.ParseUint(str, 0, val.Type().Bits()) | ||||
| 		if err == nil { | ||||
| 			val.SetUint(i) | ||||
| 		} else { | ||||
| 			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) | ||||
| 		} | ||||
| 	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": | ||||
| 		jn := data.(json.Number) | ||||
| 		i, err := jn.Int64() | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf( | ||||
| 				"error decoding json.Number into %s: %s", name, err) | ||||
| 		} | ||||
| 		if i < 0 && !d.config.WeaklyTypedInput { | ||||
| 			return fmt.Errorf("cannot parse '%s', %d overflows uint", | ||||
| 				name, i) | ||||
| 		} | ||||
| 		val.SetUint(uint64(i)) | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| @@ -502,8 +718,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e | ||||
| 		} | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| @@ -528,7 +744,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) | ||||
| 			val.SetFloat(0) | ||||
| 		} | ||||
| 	case dataKind == reflect.String && d.config.WeaklyTypedInput: | ||||
| 		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) | ||||
| 		str := dataVal.String() | ||||
| 		if str == "" { | ||||
| 			str = "0" | ||||
| 		} | ||||
|  | ||||
| 		f, err := strconv.ParseFloat(str, val.Type().Bits()) | ||||
| 		if err == nil { | ||||
| 			val.SetFloat(f) | ||||
| 		} else { | ||||
| @@ -544,8 +765,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) | ||||
| 		val.SetFloat(i) | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| @@ -596,7 +817,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref | ||||
|  | ||||
| 	for i := 0; i < dataVal.Len(); i++ { | ||||
| 		err := d.decode( | ||||
| 			fmt.Sprintf("%s[%d]", name, i), | ||||
| 			name+"["+strconv.Itoa(i)+"]", | ||||
| 			dataVal.Index(i).Interface(), val) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| @@ -629,7 +850,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle | ||||
| 	} | ||||
|  | ||||
| 	for _, k := range dataVal.MapKeys() { | ||||
| 		fieldName := fmt.Sprintf("%s[%s]", name, k) | ||||
| 		fieldName := name + "[" + k.String() + "]" | ||||
|  | ||||
| 		// First decode the key into the proper type | ||||
| 		currentKey := reflect.Indirect(reflect.New(valKeyType)) | ||||
| @@ -678,28 +899,41 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re | ||||
| 		} | ||||
|  | ||||
| 		tagValue := f.Tag.Get(d.config.TagName) | ||||
| 		tagParts := strings.Split(tagValue, ",") | ||||
| 		keyName := f.Name | ||||
|  | ||||
| 		// If Squash is set in the config, we squash the field down. | ||||
| 		squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous | ||||
|  | ||||
| 		// Determine the name of the key in the map | ||||
| 		keyName := f.Name | ||||
| 		if tagParts[0] != "" { | ||||
| 			if tagParts[0] == "-" { | ||||
| 		if index := strings.Index(tagValue, ","); index != -1 { | ||||
| 			if tagValue[:index] == "-" { | ||||
| 				continue | ||||
| 			} | ||||
| 			keyName = tagParts[0] | ||||
| 			// If "omitempty" is specified in the tag, it ignores empty values. | ||||
| 			if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			// If "squash" is specified in the tag, we squash the field down. | ||||
| 		squash := false | ||||
| 		for _, tag := range tagParts[1:] { | ||||
| 			if tag == "squash" { | ||||
| 				squash = true | ||||
| 				break | ||||
| 			squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 | ||||
| 			if squash { | ||||
| 				// When squashing, the embedded type can be a pointer to a struct. | ||||
| 				if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { | ||||
| 					v = v.Elem() | ||||
| 				} | ||||
| 		} | ||||
| 		if squash && v.Kind() != reflect.Struct { | ||||
|  | ||||
| 				// The final type must be a struct | ||||
| 				if v.Kind() != reflect.Struct { | ||||
| 					return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) | ||||
| 				} | ||||
| 			} | ||||
| 			keyName = tagValue[:index] | ||||
| 		} else if len(tagValue) > 0 { | ||||
| 			if tagValue == "-" { | ||||
| 				continue | ||||
| 			} | ||||
| 			keyName = tagValue | ||||
| 		} | ||||
|  | ||||
| 		switch v.Kind() { | ||||
| 		// this is an embedded struct, so handle it differently | ||||
| @@ -713,11 +947,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re | ||||
| 			mType := reflect.MapOf(vKeyType, vElemType) | ||||
| 			vMap := reflect.MakeMap(mType) | ||||
|  | ||||
| 			err := d.decode(keyName, x.Interface(), vMap) | ||||
| 			// Creating a pointer to a map so that other methods can completely | ||||
| 			// overwrite the map if need be (looking at you decodeMapFromMap). The | ||||
| 			// indirection allows the underlying map to be settable (CanSet() == true) | ||||
| 			// where as reflect.MakeMap returns an unsettable map. | ||||
| 			addrVal := reflect.New(vMap.Type()) | ||||
| 			reflect.Indirect(addrVal).Set(vMap) | ||||
|  | ||||
| 			err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			// the underlying map may have been completely overwritten so pull | ||||
| 			// it indirectly out of the enclosing value. | ||||
| 			vMap = reflect.Indirect(addrVal) | ||||
|  | ||||
| 			if squash { | ||||
| 				for _, k := range vMap.MapKeys() { | ||||
| 					valMap.SetMapIndex(k, vMap.MapIndex(k)) | ||||
| @@ -738,7 +983,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { | ||||
| func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { | ||||
| 	// If the input data is nil, then we want to just set the output | ||||
| 	// pointer to be nil as well. | ||||
| 	isNil := data == nil | ||||
| @@ -759,7 +1004,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er | ||||
| 			val.Set(nilValue) | ||||
| 		} | ||||
|  | ||||
| 		return nil | ||||
| 		return true, nil | ||||
| 	} | ||||
|  | ||||
| 	// Create an element of the concrete (non pointer) type and decode | ||||
| @@ -773,16 +1018,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er | ||||
| 		} | ||||
|  | ||||
| 		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { | ||||
| 			return err | ||||
| 			return false, err | ||||
| 		} | ||||
|  | ||||
| 		val.Set(realVal) | ||||
| 	} else { | ||||
| 		if err := d.decode(name, data, reflect.Indirect(val)); err != nil { | ||||
| 			return err | ||||
| 			return false, err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { | ||||
| @@ -791,8 +1036,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e | ||||
| 	dataVal := reflect.Indirect(reflect.ValueOf(data)) | ||||
| 	if val.Type() != dataVal.Type() { | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s'", | ||||
| 			name, val.Type(), dataVal.Type()) | ||||
| 			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'", | ||||
| 			name, val.Type(), dataVal.Type(), data) | ||||
| 	} | ||||
| 	val.Set(dataVal) | ||||
| 	return nil | ||||
| @@ -805,8 +1050,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) | ||||
| 	valElemType := valType.Elem() | ||||
| 	sliceType := reflect.SliceOf(valElemType) | ||||
|  | ||||
| 	valSlice := val | ||||
| 	if valSlice.IsNil() || d.config.ZeroFields { | ||||
| 	// If we have a non array/slice type then we first attempt to convert. | ||||
| 	if dataValKind != reflect.Array && dataValKind != reflect.Slice { | ||||
| 		if d.config.WeaklyTypedInput { | ||||
| 			switch { | ||||
| 			// Slice and array we use the normal logic | ||||
| @@ -833,18 +1078,17 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Check input type | ||||
| 		if dataValKind != reflect.Array && dataValKind != reflect.Slice { | ||||
| 		return fmt.Errorf( | ||||
| 			"'%s': source data must be an array or slice, got %s", name, dataValKind) | ||||
|  | ||||
| 	} | ||||
|  | ||||
| 		// If the input value is empty, then don't allocate since non-nil != nil | ||||
| 		if dataVal.Len() == 0 { | ||||
| 	// If the input value is nil, then don't allocate since empty != nil | ||||
| 	if dataVal.IsNil() { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	valSlice := val | ||||
| 	if valSlice.IsNil() || d.config.ZeroFields { | ||||
| 		// Make a new slice to hold our result, same size as the original data. | ||||
| 		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) | ||||
| 	} | ||||
| @@ -859,7 +1103,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) | ||||
| 		} | ||||
| 		currentField := valSlice.Index(i) | ||||
|  | ||||
| 		fieldName := fmt.Sprintf("%s[%d]", name, i) | ||||
| 		fieldName := name + "[" + strconv.Itoa(i) + "]" | ||||
| 		if err := d.decode(fieldName, currentData, currentField); err != nil { | ||||
| 			errors = appendErrors(errors, err) | ||||
| 		} | ||||
| @@ -926,7 +1170,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) | ||||
| 		currentData := dataVal.Index(i).Interface() | ||||
| 		currentField := valArray.Index(i) | ||||
|  | ||||
| 		fieldName := fmt.Sprintf("%s[%d]", name, i) | ||||
| 		fieldName := name + "[" + strconv.Itoa(i) + "]" | ||||
| 		if err := d.decode(fieldName, currentData, currentField); err != nil { | ||||
| 			errors = appendErrors(errors, err) | ||||
| 		} | ||||
| @@ -962,13 +1206,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) | ||||
| 		// Not the most efficient way to do this but we can optimize later if | ||||
| 		// we want to. To convert from struct to struct we go to map first | ||||
| 		// as an intermediary. | ||||
| 		m := make(map[string]interface{}) | ||||
| 		mval := reflect.Indirect(reflect.ValueOf(&m)) | ||||
| 		if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { | ||||
|  | ||||
| 		// Make a new map to hold our result | ||||
| 		mapType := reflect.TypeOf((map[string]interface{})(nil)) | ||||
| 		mval := reflect.MakeMap(mapType) | ||||
|  | ||||
| 		// Creating a pointer to a map so that other methods can completely | ||||
| 		// overwrite the map if need be (looking at you decodeMapFromMap). The | ||||
| 		// indirection allows the underlying map to be settable (CanSet() == true) | ||||
| 		// where as reflect.MakeMap returns an unsettable map. | ||||
| 		addrVal := reflect.New(mval.Type()) | ||||
|  | ||||
| 		reflect.Indirect(addrVal).Set(mval) | ||||
| 		if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		result := d.decodeStructFromMap(name, mval, val) | ||||
| 		result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) | ||||
| 		return result | ||||
|  | ||||
| 	default: | ||||
| @@ -1005,6 +1259,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 		field reflect.StructField | ||||
| 		val   reflect.Value | ||||
| 	} | ||||
|  | ||||
| 	// remainField is set to a valid field set with the "remain" tag if | ||||
| 	// we are keeping track of remaining values. | ||||
| 	var remainField *field | ||||
|  | ||||
| 	fields := []field{} | ||||
| 	for len(structs) > 0 { | ||||
| 		structVal := structs[0] | ||||
| @@ -1014,30 +1273,47 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
|  | ||||
| 		for i := 0; i < structType.NumField(); i++ { | ||||
| 			fieldType := structType.Field(i) | ||||
| 			fieldKind := fieldType.Type.Kind() | ||||
| 			fieldVal := structVal.Field(i) | ||||
| 			if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { | ||||
| 				// Handle embedded struct pointers as embedded structs. | ||||
| 				fieldVal = fieldVal.Elem() | ||||
| 			} | ||||
|  | ||||
| 			// If "squash" is specified in the tag, we squash the field down. | ||||
| 			squash := false | ||||
| 			squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous | ||||
| 			remain := false | ||||
|  | ||||
| 			// We always parse the tags cause we're looking for other tags too | ||||
| 			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") | ||||
| 			for _, tag := range tagParts[1:] { | ||||
| 				if tag == "squash" { | ||||
| 					squash = true | ||||
| 					break | ||||
| 				} | ||||
|  | ||||
| 				if tag == "remain" { | ||||
| 					remain = true | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			if squash { | ||||
| 				if fieldKind != reflect.Struct { | ||||
| 				if fieldVal.Kind() != reflect.Struct { | ||||
| 					errors = appendErrors(errors, | ||||
| 						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) | ||||
| 						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) | ||||
| 				} else { | ||||
| 					structs = append(structs, structVal.FieldByName(fieldType.Name)) | ||||
| 					structs = append(structs, fieldVal) | ||||
| 				} | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			// Build our field | ||||
| 			if remain { | ||||
| 				remainField = &field{fieldType, fieldVal} | ||||
| 			} else { | ||||
| 				// Normal struct field, store it away | ||||
| 			fields = append(fields, field{fieldType, structVal.Field(i)}) | ||||
| 				fields = append(fields, field{fieldType, fieldVal}) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @@ -1078,9 +1354,6 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Delete the key we're using from the unused map so we stop tracking | ||||
| 		delete(dataValKeysUnused, rawMapKey.Interface()) | ||||
|  | ||||
| 		if !fieldValue.IsValid() { | ||||
| 			// This should never happen | ||||
| 			panic("field is not valid") | ||||
| @@ -1092,10 +1365,13 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Delete the key we're using from the unused map so we stop tracking | ||||
| 		delete(dataValKeysUnused, rawMapKey.Interface()) | ||||
|  | ||||
| 		// If the name is empty string, then we're at the root, and we | ||||
| 		// don't dot-join the fields. | ||||
| 		if name != "" { | ||||
| 			fieldName = fmt.Sprintf("%s.%s", name, fieldName) | ||||
| 			fieldName = name + "." + fieldName | ||||
| 		} | ||||
|  | ||||
| 		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { | ||||
| @@ -1103,6 +1379,25 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// If we have a "remain"-tagged field and we have unused keys then | ||||
| 	// we put the unused keys directly into the remain field. | ||||
| 	if remainField != nil && len(dataValKeysUnused) > 0 { | ||||
| 		// Build a map of only the unused values | ||||
| 		remain := map[interface{}]interface{}{} | ||||
| 		for key := range dataValKeysUnused { | ||||
| 			remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() | ||||
| 		} | ||||
|  | ||||
| 		// Decode it as-if we were just decoding this map onto our map. | ||||
| 		if err := d.decodeMap(name, remain, remainField.val); err != nil { | ||||
| 			errors = appendErrors(errors, err) | ||||
| 		} | ||||
|  | ||||
| 		// Set the map to nil so we have none so that the next check will | ||||
| 		// not error (ErrorUnused) | ||||
| 		dataValKeysUnused = nil | ||||
| 	} | ||||
|  | ||||
| 	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { | ||||
| 		keys := make([]string, 0, len(dataValKeysUnused)) | ||||
| 		for rawKey := range dataValKeysUnused { | ||||
| @@ -1123,7 +1418,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 		for rawKey := range dataValKeysUnused { | ||||
| 			key := rawKey.(string) | ||||
| 			if name != "" { | ||||
| 				key = fmt.Sprintf("%s.%s", name, key) | ||||
| 				key = name + "." + key | ||||
| 			} | ||||
|  | ||||
| 			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) | ||||
| @@ -1133,6 +1428,24 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func isEmptyValue(v reflect.Value) bool { | ||||
| 	switch getKind(v) { | ||||
| 	case reflect.Array, reflect.Map, reflect.Slice, reflect.String: | ||||
| 		return v.Len() == 0 | ||||
| 	case reflect.Bool: | ||||
| 		return !v.Bool() | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||
| 		return v.Int() == 0 | ||||
| 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: | ||||
| 		return v.Uint() == 0 | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return v.Float() == 0 | ||||
| 	case reflect.Interface, reflect.Ptr: | ||||
| 		return v.IsNil() | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func getKind(val reflect.Value) reflect.Kind { | ||||
| 	kind := val.Kind() | ||||
|  | ||||
|   | ||||
							
								
								
									
										14
									
								
								vendor/github.com/sirupsen/logrus/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/sirupsen/logrus/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -4,14 +4,12 @@ git: | ||||
|   depth: 1 | ||||
| env: | ||||
|   - GO111MODULE=on | ||||
| go: [1.13.x, 1.14.x] | ||||
| os: [linux, osx] | ||||
| go: 1.15.x | ||||
| os: linux | ||||
| install: | ||||
|   - ./travis/install.sh | ||||
| script: | ||||
|   - ./travis/cross_build.sh | ||||
|   - ./travis/lint.sh | ||||
|   - export GOMAXPROCS=4 | ||||
|   - export GORACE=halt_on_error=1 | ||||
|   - go test -race -v ./... | ||||
|   - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi | ||||
|   - cd ci | ||||
|   - go run mage.go -v -w ../ crossBuild | ||||
|   - go run mage.go -v -w ../ lint | ||||
|   - go run mage.go -v -w ../ test | ||||
|   | ||||
							
								
								
									
										36
									
								
								vendor/github.com/sirupsen/logrus/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								vendor/github.com/sirupsen/logrus/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,3 +1,39 @@ | ||||
| # 1.8.1 | ||||
| Code quality: | ||||
|   * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer | ||||
|   * improve timestamp format documentation | ||||
|  | ||||
| Fixes: | ||||
|   * fix race condition on logger hooks | ||||
|  | ||||
|  | ||||
| # 1.8.0 | ||||
|  | ||||
| Correct versioning number replacing v1.7.1. | ||||
|  | ||||
| # 1.7.1 | ||||
|  | ||||
| Beware this release has introduced a new public API and its semver is therefore incorrect. | ||||
|  | ||||
| Code quality: | ||||
|   * use go 1.15 in travis | ||||
|   * use magefile as task runner | ||||
|  | ||||
| Fixes: | ||||
|   * small fixes about new go 1.13 error formatting system | ||||
|   * Fix for long time race condiction with mutating data hooks | ||||
|  | ||||
| Features: | ||||
|   * build support for zos | ||||
|  | ||||
| # 1.7.0 | ||||
| Fixes: | ||||
|   * the dependency toward a windows terminal library has been removed | ||||
|  | ||||
| Features: | ||||
|   * a new buffer pool management API has been added | ||||
|   * a set of `<LogLevel>Fn()` functions have been added | ||||
|  | ||||
| # 1.6.0 | ||||
| Fixes: | ||||
|   * end of line cleanup | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/sirupsen/logrus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/sirupsen/logrus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { | ||||
|   // source of the official loggers. | ||||
|   serialized, err := json.Marshal(entry.Data) | ||||
|     if err != nil { | ||||
|       return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) | ||||
|       return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) | ||||
|     } | ||||
|   return append(serialized, '\n'), nil | ||||
| } | ||||
|   | ||||
							
								
								
									
										75
									
								
								vendor/github.com/sirupsen/logrus/entry.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										75
									
								
								vendor/github.com/sirupsen/logrus/entry.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -78,6 +78,14 @@ func NewEntry(logger *Logger) *Entry { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (entry *Entry) Dup() *Entry { | ||||
| 	data := make(Fields, len(entry.Data)) | ||||
| 	for k, v := range entry.Data { | ||||
| 		data[k] = v | ||||
| 	} | ||||
| 	return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} | ||||
| } | ||||
|  | ||||
| // Returns the bytes representation of this entry from the formatter. | ||||
| func (entry *Entry) Bytes() ([]byte, error) { | ||||
| 	return entry.Logger.Formatter.Format(entry) | ||||
| @@ -123,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry { | ||||
| 	for k, v := range fields { | ||||
| 		isErrField := false | ||||
| 		if t := reflect.TypeOf(v); t != nil { | ||||
| 			switch t.Kind() { | ||||
| 			case reflect.Func: | ||||
| 			switch { | ||||
| 			case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: | ||||
| 				isErrField = true | ||||
| 			case reflect.Ptr: | ||||
| 				isErrField = t.Elem().Kind() == reflect.Func | ||||
| 			} | ||||
| 		} | ||||
| 		if isErrField { | ||||
| @@ -212,68 +218,72 @@ func (entry Entry) HasCaller() (has bool) { | ||||
| 		entry.Caller != nil | ||||
| } | ||||
|  | ||||
| // This function is not declared with a pointer value because otherwise | ||||
| // race conditions will occur when using multiple goroutines | ||||
| func (entry Entry) log(level Level, msg string) { | ||||
| func (entry *Entry) log(level Level, msg string) { | ||||
| 	var buffer *bytes.Buffer | ||||
|  | ||||
| 	// Default to now, but allow users to override if they want. | ||||
| 	// | ||||
| 	// We don't have to worry about polluting future calls to Entry#log() | ||||
| 	// with this assignment because this function is declared with a | ||||
| 	// non-pointer receiver. | ||||
| 	if entry.Time.IsZero() { | ||||
| 		entry.Time = time.Now() | ||||
| 	newEntry := entry.Dup() | ||||
|  | ||||
| 	if newEntry.Time.IsZero() { | ||||
| 		newEntry.Time = time.Now() | ||||
| 	} | ||||
|  | ||||
| 	entry.Level = level | ||||
| 	entry.Message = msg | ||||
| 	entry.Logger.mu.Lock() | ||||
| 	if entry.Logger.ReportCaller { | ||||
| 		entry.Caller = getCaller() | ||||
| 	} | ||||
| 	entry.Logger.mu.Unlock() | ||||
| 	newEntry.Level = level | ||||
| 	newEntry.Message = msg | ||||
|  | ||||
| 	entry.fireHooks() | ||||
| 	newEntry.Logger.mu.Lock() | ||||
| 	reportCaller := newEntry.Logger.ReportCaller | ||||
| 	newEntry.Logger.mu.Unlock() | ||||
|  | ||||
| 	if reportCaller { | ||||
| 		newEntry.Caller = getCaller() | ||||
| 	} | ||||
|  | ||||
| 	newEntry.fireHooks() | ||||
|  | ||||
| 	buffer = getBuffer() | ||||
| 	defer func() { | ||||
| 		entry.Buffer = nil | ||||
| 		newEntry.Buffer = nil | ||||
| 		putBuffer(buffer) | ||||
| 	}() | ||||
| 	buffer.Reset() | ||||
| 	entry.Buffer = buffer | ||||
| 	newEntry.Buffer = buffer | ||||
|  | ||||
| 	entry.write() | ||||
| 	newEntry.write() | ||||
|  | ||||
| 	entry.Buffer = nil | ||||
| 	newEntry.Buffer = nil | ||||
|  | ||||
| 	// To avoid Entry#log() returning a value that only would make sense for | ||||
| 	// panic() to use in Entry#Panic(), we avoid the allocation by checking | ||||
| 	// directly here. | ||||
| 	if level <= PanicLevel { | ||||
| 		panic(&entry) | ||||
| 		panic(newEntry) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (entry *Entry) fireHooks() { | ||||
| 	var tmpHooks LevelHooks | ||||
| 	entry.Logger.mu.Lock() | ||||
| 	defer entry.Logger.mu.Unlock() | ||||
| 	err := entry.Logger.Hooks.Fire(entry.Level, entry) | ||||
| 	tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) | ||||
| 	for k, v := range entry.Logger.Hooks { | ||||
| 		tmpHooks[k] = v | ||||
| 	} | ||||
| 	entry.Logger.mu.Unlock() | ||||
|  | ||||
| 	err := tmpHooks.Fire(entry.Level, entry) | ||||
| 	if err != nil { | ||||
| 		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (entry *Entry) write() { | ||||
| 	entry.Logger.mu.Lock() | ||||
| 	defer entry.Logger.mu.Unlock() | ||||
| 	serialized, err := entry.Logger.Formatter.Format(entry) | ||||
| 	if err != nil { | ||||
| 		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) | ||||
| 		return | ||||
| 	} | ||||
| 	if _, err = entry.Logger.Out.Write(serialized); err != nil { | ||||
| 	entry.Logger.mu.Lock() | ||||
| 	defer entry.Logger.mu.Unlock() | ||||
| 	if _, err := entry.Logger.Out.Write(serialized); err != nil { | ||||
| 		fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) | ||||
| 	} | ||||
| } | ||||
| @@ -319,7 +329,6 @@ func (entry *Entry) Fatal(args ...interface{}) { | ||||
|  | ||||
| func (entry *Entry) Panic(args ...interface{}) { | ||||
| 	entry.Log(PanicLevel, args...) | ||||
| 	panic(fmt.Sprint(args...)) | ||||
| } | ||||
|  | ||||
| // Entry Printf family functions | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/sirupsen/logrus/go.sum
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/sirupsen/logrus/go.sum
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -4,7 +4,5 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= | ||||
| github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= | ||||
| golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= | ||||
| golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= | ||||
| golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
|   | ||||
							
								
								
									
										5
									
								
								vendor/github.com/sirupsen/logrus/json_formatter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/sirupsen/logrus/json_formatter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -23,6 +23,9 @@ func (f FieldMap) resolve(key fieldKey) string { | ||||
| // JSONFormatter formats logs into parsable json | ||||
| type JSONFormatter struct { | ||||
| 	// TimestampFormat sets the format used for marshaling timestamps. | ||||
| 	// The format to use is the same than for time.Format or time.Parse from the standard | ||||
| 	// library. | ||||
| 	// The standard Library already provides a set of predefined format. | ||||
| 	TimestampFormat string | ||||
|  | ||||
| 	// DisableTimestamp allows disabling automatic timestamps in output | ||||
| @@ -118,7 +121,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { | ||||
| 		encoder.SetIndent("", "  ") | ||||
| 	} | ||||
| 	if err := encoder.Encode(data); err != nil { | ||||
| 		return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) | ||||
| 		return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) | ||||
| 	} | ||||
|  | ||||
| 	return b.Bytes(), nil | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/sirupsen/logrus/terminal_check_unix.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/sirupsen/logrus/terminal_check_unix.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,4 +1,4 @@ | ||||
| // +build linux aix | ||||
| // +build linux aix zos | ||||
| // +build !js | ||||
|  | ||||
| package logrus | ||||
|   | ||||
							
								
								
									
										7
									
								
								vendor/github.com/sirupsen/logrus/text_formatter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/sirupsen/logrus/text_formatter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -53,7 +53,10 @@ type TextFormatter struct { | ||||
| 	// the time passed since beginning of execution. | ||||
| 	FullTimestamp bool | ||||
|  | ||||
| 	// TimestampFormat to use for display when a full timestamp is printed | ||||
| 	// TimestampFormat to use for display when a full timestamp is printed. | ||||
| 	// The format to use is the same than for time.Format or time.Parse from the standard | ||||
| 	// library. | ||||
| 	// The standard Library already provides a set of predefined format. | ||||
| 	TimestampFormat string | ||||
|  | ||||
| 	// The fields are sorted by default for a consistent output. For applications | ||||
| @@ -235,6 +238,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin | ||||
| 		levelColor = yellow | ||||
| 	case ErrorLevel, FatalLevel, PanicLevel: | ||||
| 		levelColor = red | ||||
| 	case InfoLevel: | ||||
| 		levelColor = blue | ||||
| 	default: | ||||
| 		levelColor = blue | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										29
									
								
								vendor/modules.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/modules.txt
									
									
									
									
										vendored
									
									
								
							| @@ -42,6 +42,13 @@ github.com/apparentlymart/go-textseg/v12/textseg | ||||
| github.com/beorn7/perks/quantile | ||||
| # github.com/cespare/xxhash/v2 v2.1.1 | ||||
| github.com/cespare/xxhash/v2 | ||||
| # github.com/compose-spec/compose-go v0.0.0-20210706130854-69459d4976b5 | ||||
| github.com/compose-spec/compose-go/errdefs | ||||
| github.com/compose-spec/compose-go/interpolation | ||||
| github.com/compose-spec/compose-go/loader | ||||
| github.com/compose-spec/compose-go/schema | ||||
| github.com/compose-spec/compose-go/template | ||||
| github.com/compose-spec/compose-go/types | ||||
| # github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 | ||||
| github.com/containerd/cgroups/stats/v1 | ||||
| # github.com/containerd/console v1.0.1 | ||||
| @@ -75,16 +82,14 @@ github.com/containerd/continuity/sysx | ||||
| github.com/containerd/typeurl | ||||
| # github.com/davecgh/go-spew v1.1.1 | ||||
| github.com/davecgh/go-spew/spew | ||||
| # github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e | ||||
| github.com/distribution/distribution/v3/digestset | ||||
| github.com/distribution/distribution/v3/reference | ||||
| # github.com/docker/cli v20.10.5+incompatible | ||||
| github.com/docker/cli/cli | ||||
| github.com/docker/cli/cli-plugins/manager | ||||
| github.com/docker/cli/cli-plugins/plugin | ||||
| github.com/docker/cli/cli/command | ||||
| github.com/docker/cli/cli/compose/interpolation | ||||
| github.com/docker/cli/cli/compose/loader | ||||
| github.com/docker/cli/cli/compose/schema | ||||
| github.com/docker/cli/cli/compose/template | ||||
| github.com/docker/cli/cli/compose/types | ||||
| github.com/docker/cli/cli/config | ||||
| github.com/docker/cli/cli/config/configfile | ||||
| github.com/docker/cli/cli/config/credentials | ||||
| @@ -195,7 +200,7 @@ github.com/golang/protobuf/ptypes/any | ||||
| github.com/golang/protobuf/ptypes/duration | ||||
| github.com/golang/protobuf/ptypes/struct | ||||
| github.com/golang/protobuf/ptypes/timestamp | ||||
| # github.com/google/go-cmp v0.5.4 | ||||
| # github.com/google/go-cmp v0.5.5 | ||||
| github.com/google/go-cmp/cmp | ||||
| github.com/google/go-cmp/cmp/internal/diff | ||||
| github.com/google/go-cmp/cmp/internal/flags | ||||
| @@ -232,12 +237,14 @@ github.com/hashicorp/hcl/v2/hclparse | ||||
| github.com/hashicorp/hcl/v2/hclsyntax | ||||
| github.com/hashicorp/hcl/v2/hclwrite | ||||
| github.com/hashicorp/hcl/v2/json | ||||
| # github.com/imdario/mergo v0.3.11 | ||||
| # github.com/imdario/mergo v0.3.12 | ||||
| github.com/imdario/mergo | ||||
| # github.com/inconshreveable/mousetrap v1.0.0 | ||||
| github.com/inconshreveable/mousetrap | ||||
| # github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 | ||||
| github.com/jaguilar/vt100 | ||||
| # github.com/joho/godotenv v1.3.0 | ||||
| github.com/joho/godotenv | ||||
| # github.com/json-iterator/go v1.1.10 | ||||
| github.com/json-iterator/go | ||||
| # github.com/klauspost/compress v1.11.3 | ||||
| @@ -246,13 +253,15 @@ github.com/klauspost/compress/huff0 | ||||
| github.com/klauspost/compress/snappy | ||||
| github.com/klauspost/compress/zstd | ||||
| github.com/klauspost/compress/zstd/internal/xxhash | ||||
| # github.com/mattn/go-shellwords v1.0.12 | ||||
| github.com/mattn/go-shellwords | ||||
| # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 | ||||
| github.com/matttproud/golang_protobuf_extensions/pbutil | ||||
| # github.com/miekg/pkcs11 v1.0.3 | ||||
| github.com/miekg/pkcs11 | ||||
| # github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 | ||||
| github.com/mitchellh/go-wordwrap | ||||
| # github.com/mitchellh/mapstructure v1.1.2 | ||||
| # github.com/mitchellh/mapstructure v1.4.1 | ||||
| github.com/mitchellh/mapstructure | ||||
| # github.com/moby/buildkit v0.8.2-0.20210401015549-df49b648c8bf | ||||
| github.com/moby/buildkit/api/services/control | ||||
| @@ -337,7 +346,7 @@ github.com/prometheus/procfs/internal/fs | ||||
| github.com/prometheus/procfs/internal/util | ||||
| # github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 | ||||
| github.com/serialx/hashring | ||||
| # github.com/sirupsen/logrus v1.7.0 | ||||
| # github.com/sirupsen/logrus v1.8.1 | ||||
| github.com/sirupsen/logrus | ||||
| # github.com/spf13/cobra v1.1.1 | ||||
| github.com/spf13/cobra | ||||
| @@ -418,7 +427,7 @@ golang.org/x/oauth2/google | ||||
| golang.org/x/oauth2/internal | ||||
| golang.org/x/oauth2/jws | ||||
| golang.org/x/oauth2/jwt | ||||
| # golang.org/x/sync v0.0.0-20201207232520-09787c993a3a | ||||
| # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c | ||||
| golang.org/x/sync/errgroup | ||||
| golang.org/x/sync/semaphore | ||||
| # golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 CrazyMax
					CrazyMax