Bump buildkit to master and fix versions incompatible with go mod 1.13

Bump github.com/gogo/googleapis to v1.3.2
Bump github.com/docker/cli to master

Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
This commit is contained in:
Silvin Lubecki
2020-03-03 16:46:38 +01:00
parent 54549235da
commit bbc902b4d6
1384 changed files with 186012 additions and 165455 deletions

View File

@ -6,11 +6,13 @@ import (
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
// ReexecEnvvar is the name of an ennvar which is set to the command
@ -141,6 +143,10 @@ func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error
}
}
sort.Slice(plugins, func(i, j int) bool {
return sortorder.NaturalLess(plugins[i].Name, plugins[j].Name)
})
return plugins, nil
}

View File

@ -22,9 +22,8 @@ import (
"github.com/docker/cli/cli/streams"
"github.com/docker/cli/cli/trust"
"github.com/docker/cli/cli/version"
"github.com/docker/cli/internal/containerizedengine"
dopts "github.com/docker/cli/opts"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/client"
@ -60,7 +59,6 @@ type Cli interface {
ManifestStore() manifeststore.Store
RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
ContextStore() store.Store
CurrentContext() string
StackOrchestrator(flagValue string) (Orchestrator, error)
@ -70,24 +68,23 @@ type Cli interface {
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
in *streams.In
out *streams.Out
err io.Writer
client client.APIClient
serverInfo ServerInfo
clientInfo ClientInfo
contentTrust bool
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
contextStore store.Store
currentContext string
dockerEndpoint docker.Endpoint
contextStoreConfig store.Config
configFile *configfile.ConfigFile
in *streams.In
out *streams.Out
err io.Writer
client client.APIClient
serverInfo ServerInfo
clientInfo *ClientInfo
contentTrust bool
contextStore store.Store
currentContext string
dockerEndpoint docker.Endpoint
contextStoreConfig store.Config
}
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
func (cli *DockerCli) DefaultVersion() string {
return cli.clientInfo.DefaultVersion
return cli.ClientInfo().DefaultVersion
}
// Client returns the APIClient
@ -126,9 +123,16 @@ func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
if cli.configFile == nil {
cli.loadConfigFile()
}
return cli.configFile
}
func (cli *DockerCli) loadConfigFile() {
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
}
// ServerInfo returns the server version details for the host this client is
// connected to
func (cli *DockerCli) ServerInfo() ServerInfo {
@ -137,7 +141,34 @@ func (cli *DockerCli) ServerInfo() ServerInfo {
// ClientInfo returns the client details for the cli
func (cli *DockerCli) ClientInfo() ClientInfo {
return cli.clientInfo
if cli.clientInfo == nil {
_ = cli.loadClientInfo()
}
return *cli.clientInfo
}
func (cli *DockerCli) loadClientInfo() error {
var experimentalValue string
// Environment variable always overrides configuration
if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" {
experimentalValue = cli.ConfigFile().Experimental
}
hasExperimental, err := isEnabled(experimentalValue)
if err != nil {
return errors.Wrap(err, "Experimental field")
}
var v string
if cli.client != nil {
v = cli.client.ClientVersion()
} else {
v = api.DefaultVersion
}
cli.clientInfo = &ClientInfo{
DefaultVersion: v,
HasExperimental: hasExperimental,
}
return nil
}
// ContentTrustEnabled returns whether content trust has been enabled by an
@ -207,7 +238,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
debug.Enable()
}
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
cli.loadConfigFile()
baseContextStore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig)
cli.contextStore = &ContextStoreWithDefault{
@ -239,18 +270,9 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
return err
}
}
var experimentalValue string
// Environment variable always overrides configuration
if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" {
experimentalValue = cli.configFile.Experimental
}
hasExperimental, err := isEnabled(experimentalValue)
err = cli.loadClientInfo()
if err != nil {
return errors.Wrap(err, "Experimental field")
}
cli.clientInfo = ClientInfo{
DefaultVersion: cli.client.ClientVersion(),
HasExperimental: hasExperimental,
return err
}
cli.initializeFromClient()
return nil
@ -381,11 +403,6 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// NewContainerizedEngineClient returns a containerized engine client
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
return cli.newContainerizeClient(sockPath)
}
// ContextStore returns the ContextStore
func (cli *DockerCli) ContextStore() store.Store {
return cli.contextStore
@ -445,13 +462,12 @@ type ClientInfo struct {
}
// NewDockerCli returns a DockerCli instance with all operators applied on it.
// It applies by default the standard streams, the content trust from
// environment and the default containerized client constructor operations.
// It applies by default the standard streams, and the content trust from
// environment.
func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
cli := &DockerCli{}
defaultOps := []DockerCliOption{
WithContentTrustFromEnv(),
WithContainerizedClient(containerizedengine.NewClient),
}
cli.contextStoreConfig = DefaultContextStoreConfig()
ops = append(defaultOps, ops...)

View File

@ -9,7 +9,6 @@ import (
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/store"
"github.com/docker/cli/cli/streams"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/pkg/term"
)
@ -83,14 +82,6 @@ func WithContentTrust(enabled bool) DockerCliOption {
}
}
// WithContainerizedClient sets the containerized client constructor on a cli.
func WithContainerizedClient(containerizedFn func(string) (clitypes.ContainerizedClient, error)) DockerCliOption {
return func(cli *DockerCli) error {
cli.newContainerizeClient = containerizedFn
return nil
}
}
// WithContextEndpointType add support for an additional typed endpoint in the context store
// Plugins should use this to store additional endpoints configuration in the context store
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {

View File

@ -78,7 +78,7 @@ func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool {
}
message += " [y/N] "
fmt.Fprintf(outs, message)
_, _ = fmt.Fprint(outs, message)
// On Windows, force the use of the regular OS stdin stream.
if runtime.GOOS == "windows" {
@ -130,7 +130,7 @@ func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
// ValidateOutputPath validates the output paths of the `export` and `save` commands.
func ValidateOutputPath(path string) error {
dir := filepath.Dir(path)
dir := filepath.Dir(filepath.Clean(path))
if dir != "" && dir != "." {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return errors.Errorf("invalid output path: directory %q does not exist", dir)
@ -139,6 +139,10 @@ func ValidateOutputPath(path string) error {
// check whether `path` points to a regular file
// (if the path exists and doesn't point to a directory)
if fileInfo, err := os.Stat(path); !os.IsNotExist(err) {
if err != nil {
return err
}
if fileInfo.Mode().IsDir() || fileInfo.Mode().IsRegular() {
return nil
}

View File

@ -54,7 +54,6 @@ func Interpolate(config map[string]interface{}, opts Options) (map[string]interf
func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) {
switch value := value.(type) {
case string:
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
if err != nil || newValue == value {
@ -91,7 +90,6 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface
default:
return value, nil
}
}

View File

@ -19,6 +19,7 @@ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
servicePath("deploy", "rollback_config", "parallelism"): toInt,
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
servicePath("ports", interp.PathMatchList, "target"): toInt,
servicePath("ports", interp.PathMatchList, "published"): toInt,
servicePath("ulimits", interp.PathMatchAll): toInt,

View File

@ -297,10 +297,13 @@ func Transform(source interface{}, target interface{}, additionalTransformers ..
return decoder.Decode(source)
}
// TransformerFunc defines a function to perform the actual transformation
type TransformerFunc func(interface{}) (interface{}, error)
// Transformer defines a map to type transformer
type Transformer struct {
TypeOf reflect.Type
Func func(interface{}) (interface{}, error)
Func TransformerFunc
}
func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType {
@ -684,7 +687,7 @@ func absPath(workingDir string, filePath string) string {
return filepath.Join(workingDir, filePath)
}
func transformMapStringString(data interface{}) (interface{}, error) {
var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case map[string]interface{}:
return toMapStringString(value, false), nil
@ -695,7 +698,7 @@ func transformMapStringString(data interface{}) (interface{}, error) {
}
}
func transformExternal(data interface{}) (interface{}, error) {
var transformExternal TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case bool:
return map[string]interface{}{"external": value}, nil
@ -706,7 +709,7 @@ func transformExternal(data interface{}) (interface{}, error) {
}
}
func transformServicePort(data interface{}) (interface{}, error) {
var transformServicePort TransformerFunc = func(data interface{}) (interface{}, error) {
switch entries := data.(type) {
case []interface{}:
// We process the list instead of individual items here.
@ -739,7 +742,7 @@ func transformServicePort(data interface{}) (interface{}, error) {
}
}
func transformStringSourceMap(data interface{}) (interface{}, error) {
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return map[string]interface{}{"source": value}, nil
@ -750,7 +753,7 @@ func transformStringSourceMap(data interface{}) (interface{}, error) {
}
}
func transformBuildConfig(data interface{}) (interface{}, error) {
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return map[string]interface{}{"context": value}, nil
@ -761,7 +764,7 @@ func transformBuildConfig(data interface{}) (interface{}, error) {
}
}
func transformServiceVolumeConfig(data interface{}) (interface{}, error) {
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return ParseVolume(value)
@ -772,7 +775,7 @@ func transformServiceVolumeConfig(data interface{}) (interface{}, error) {
}
}
func transformServiceNetworkMap(value interface{}) (interface{}, error) {
var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interface{}, error) {
if list, ok := value.([]interface{}); ok {
mapValue := map[interface{}]interface{}{}
for _, name := range list {
@ -783,7 +786,7 @@ func transformServiceNetworkMap(value interface{}) (interface{}, error) {
return value, nil
}
func transformStringOrNumberList(value interface{}) (interface{}, error) {
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
list := value.([]interface{})
result := make([]string, len(list))
for i, item := range list {
@ -792,7 +795,7 @@ func transformStringOrNumberList(value interface{}) (interface{}, error) {
return result, nil
}
func transformStringList(data interface{}) (interface{}, error) {
var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return []string{value}, nil
@ -803,13 +806,13 @@ func transformStringList(data interface{}) (interface{}, error) {
}
}
func transformMappingOrListFunc(sep string, allowNil bool) func(interface{}) (interface{}, error) {
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
return func(data interface{}) (interface{}, error) {
return transformMappingOrList(data, sep, allowNil), nil
}
}
func transformListOrMappingFunc(sep string, allowNil bool) func(interface{}) (interface{}, error) {
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
return func(data interface{}) (interface{}, error) {
return transformListOrMapping(data, sep, allowNil), nil
}
@ -848,14 +851,14 @@ func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
}
func transformShellCommand(value interface{}) (interface{}, error) {
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
if str, ok := value.(string); ok {
return shellwords.Parse(str)
}
return value, nil
}
func transformHealthCheckTest(data interface{}) (interface{}, error) {
var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return append([]string{"CMD-SHELL"}, value), nil
@ -866,7 +869,7 @@ func transformHealthCheckTest(data interface{}) (interface{}, error) {
}
}
func transformSize(value interface{}) (interface{}, error) {
var transformSize TransformerFunc = func(value interface{}) (interface{}, error) {
switch value := value.(type) {
case int:
return int64(value), nil
@ -876,7 +879,7 @@ func transformSize(value interface{}) (interface{}, error) {
panic(errors.Errorf("invalid type for size %T", value))
}
func transformStringToDuration(value interface{}) (interface{}, error) {
var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) {
switch value := value.(type) {
case string:
d, err := time.ParseDuration(value)

View File

@ -57,9 +57,12 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig,
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig,
reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig,
},
}
for name, overrideService := range overrideServices {
overrideService := overrideService
if baseService, ok := baseServices[name]; ok {
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
return base, errors.Wrapf(err, "cannot merge service %s", name)
@ -200,6 +203,28 @@ func mergeLoggingConfig(dst, src reflect.Value) error {
return nil
}
//nolint: unparam
func mergeUlimitsConfig(dst, src reflect.Value) error {
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
dst.Elem().Set(src.Elem())
}
return nil
}
//nolint: unparam
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" {
dst.Elem().FieldByName("Ipv4Address").SetString(ipv4)
}
if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" {
dst.Elem().FieldByName("Ipv6Address").SetString(ipv6)
}
}
return nil
}
func getLoggingDriver(v reflect.Value) string {
return v.FieldByName("Driver").String()
}

View File

@ -111,6 +111,9 @@ func isFilePath(source string) bool {
case '.', '/', '~':
return true
}
if len([]rune(source)) == 1 {
return false
}
// windows named pipes
if strings.HasPrefix(source, `\\`) {

View File

@ -6,6 +6,8 @@ import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
@ -100,7 +102,24 @@ func (f *_escFile) Close() error {
}
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
if !f.isDir {
return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name)
}
fis, ok := _escDirs[f.local]
if !ok {
return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local)
}
limit := count
if count <= 0 || limit > len(fis) {
limit = len(fis)
}
if len(fis) == 0 && count > 0 {
return nil, io.EOF
}
return fis[0:limit], nil
}
func (f *_escFile) Stat() (os.FileInfo, error) {
@ -191,6 +210,7 @@ func _escFSMustString(useLocal bool, name string) string {
var _escData = map[string]*_escFile{
"/data/config_schema_v3.0.json": {
name: "config_schema_v3.0.json",
local: "data/config_schema_v3.0.json",
size: 11063,
modtime: 1518458244,
@ -226,6 +246,7 @@ xHv6XdkMqA34L74ys3aKw8XE5Pt4DNh+IZaN/DMhad9yDyAlGzbxc2F0fns2HUJ234BlbrgaD1QS+++Y
},
"/data/config_schema_v3.1.json": {
name: "config_schema_v3.1.json",
local: "data/config_schema_v3.1.json",
size: 12209,
modtime: 1518458244,
@ -262,6 +283,7 @@ f55aMuPsI9DxPLh9jLlw/TGcbUX23yn6OwAA//8cyfJJsS8AAA==
},
"/data/config_schema_v3.2.json": {
name: "config_schema_v3.2.json",
local: "data/config_schema_v3.2.json",
size: 13755,
modtime: 1518458244,
@ -300,6 +322,7 @@ ZbmZv+QaLHpS4rzkKyabuw8zkGHuhdMrnbUrtIOnbTqoMzZd83f41N8R/735o4f/lZziOLoU+2E3AJpH
},
"/data/config_schema_v3.3.json": {
name: "config_schema_v3.3.json",
local: "data/config_schema_v3.3.json",
size: 15491,
modtime: 1518458244,
@ -340,6 +363,7 @@ b/Iu7P/nxf8DAAD//7pHo+CDPAAA
},
"/data/config_schema_v3.4.json": {
name: "config_schema_v3.4.json",
local: "data/config_schema_v3.4.json",
size: 15874,
modtime: 1518458244,
@ -381,6 +405,7 @@ PS1sPmQbucDQbzovyv9fFv8LAAD//+uCPa4CPgAA
},
"/data/config_schema_v3.5.json": {
name: "config_schema_v3.5.json",
local: "data/config_schema_v3.5.json",
size: 16802,
modtime: 1518458244,
@ -423,6 +448,7 @@ Pum2n6FuR/KZkNgb9IOAvY0qfF0fuE7P2bsPTT1Xf8bV4ab+/7z5fwAAAP//yoGbgKJBAAA=
},
"/data/config_schema_v3.6.json": {
name: "config_schema_v3.6.json",
local: "data/config_schema_v3.6.json",
size: 17084,
modtime: 1518458244,
@ -466,6 +492,7 @@ oqbZ4ab+/7z5fwAAAP//nm8U9rxCAAA=
},
"/data/config_schema_v3.7.json": {
name: "config_schema_v3.7.json",
local: "data/config_schema_v3.7.json",
size: 17854,
modtime: 1518458244,
@ -509,6 +536,7 @@ bnBpPlHfjORjkTRf1wyAwiYqMXd9/G6313QfoXs6/sbZ66r6e179PwAA//8ZL3SpvkUAAA==
},
"/data/config_schema_v3.8.json": {
name: "config_schema_v3.8.json",
local: "data/config_schema_v3.8.json",
size: 18246,
modtime: 1518458244,
@ -553,56 +581,69 @@ ean7MQBPP+U4w19V/z+t/hsAAP//Fd/bF0ZHAAA=
},
"/data/config_schema_v3.9.json": {
name: "config_schema_v3.9.json",
local: "data/config_schema_v3.9.json",
size: 18246,
size: 18291,
modtime: 1518458244,
compressed: `
H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIsDOLcecknMaHoGmyja3KZJbpDztHfi/B3q2RJEibcvd
vUkHCHZaKj7qya+KJf9YJUn6s6Z7KEj6NUn3xqivj4+/aynum6cPEnePOZKtuf/y62Pz7Kf0rhrH8moI
lWLLdlnzJjv87eG3h2p4Q2KOCioiufkdqGmeIfxRMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd
g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/8x/3//ly/9tD
dr/+5efR60q+CNtm+Ry2TDDDpOjXT3vKU/uvU78wyfOamPDR2lvCNYx5FmC+S3wO8dyTvRPP7foOnsfs
HCQvi6AGO6p3YqZZfhn9aaAIJmyyDdW7WWy1/DIMN1EjxHBH9U4MN8tfx/CqY9q9x/Tby33131M95+x8
zSyD/dVMjGKeS5yumOOXZy9QjyRzUFwe6527ZdYQFCBM2ospSdJNyXhuS10K+Fc1xdPgYZL8sMP7YJ76
/egvv1H07z289O+pFAZeTM3U/NKNCCR9BtwyDrEjCDaW7hEZZ9pkErOcUeMcz8kG+FUzUEL3kG1RFsFZ
tlnDiXZO1EXwSM4NwR1ES1bvi0yzP0dyfUqZMLADTO/6seuTNXYyWdgxbZ+u/rdeOSZMKVEZyfMREwSR
HKsdMQOFdvOXpKVgf5Twz5bEYAn2vDlKtfzEO5SlyhTBygvnZZ9SWRRELOWa5/ARIfnJITHy93aN4at+
tdG2PNwkEVbpCBeBcBMOOJWlyxJpbPw414+SJC1ZHk+8O4e4kPl436IsNoDpaUI8cdLR3+uV642lfUOY
AMwEKSBoxwg5CMMIz7QC6rMZh9Lm1NWaYIR40sgDIUXYMW3w6KRdeWJaXDwbyiMHBSLXWZM4nR/x0xz6
LGrR6JSLuZOsmaY6y6q9pdbATANBur9wvCwIEzG2BMLgUUnWRM8PFxZBHLLe2s4WA4gDQymK7myIQxSD
8S9Karg+Jvfne8v4XR9K1rZnSSxItdluba+XTC1vKMAhDxUSJzzjTDwvb+LwYpBke6nNJaAt3QPhZk/3
QJ9nhg+pRqOlNjFGzgqyCxMJNj51NlJyIGJMpGhwHi05MW0VZ47wYqibLqrKwbRyt6tIffY7SZ0ik44c
2QEwFhlL9ZrxueBBCJIEU+QR6beHJkOe8dH6X5xPobjr5Lef2Edi7OH2qpWC0AqTI2gdsqg2Y8kmwOWV
dkKsY+P+RYnU+QlslOqCVY4gHPZB3ngri4O/ndo5Ixr0dRnpIAodfo20CdfYv8+O9Qz1zhmffwamGuJs
zp0bWYeR9y3TYzXOHsaxoo4QQwdTEs2bJHSvceoVPjSLT3M8W91Rg26TGM5Eqbi0sKuWuAeocsOZ3kN+
zhiURlLJ4xzDWf+Kd4aZJPEipKeQHRiHncWxC8YgkDyTgh8jKLUhGCytaKAlMnPMpDKLY0x3rezV6vtS
2XhD1i3DZz3l/6eeoo+amsuwtTY5E5lUIIK+oY1U2Q4JhUwBMukUxSjA5iU2qcFkGs12gvCQm5lCbS8s
KRgTdvaSs4L5ncZZUAritQaruSHaDDyLCtkzGcJ8ghCRGewJnnF01I659ZxPq0gMNO4XqOe7azeydtKf
Bb3sbay96MftVKUOJnE1jdBZxNHuuPj+a0TokY5q8vVFcbxdKTJ23jrqRyOCccFYM21A0GP8Qhs2uYE5
N++Ky7pqKrLzl2LcuUm0r7Y9EW/CipBUKo9qrmSjP1Juz0WH4fzJqR05Z/LYgglWlEX6Nfniy1jjJXNj
aG/VgGYAvS/2fpf4XJ3sOcM5Wz7Nd4mMOzDObGOxSrVzvRdD0mA/y3wfSKhHg2mysS6jnHVbYQAPboAV
RmgIBpl1P9Rh1yHEAv0xb1EMK0CW5lJ4StCcD3DtbrdBS013HzNnQgNK24KeehPqyi5BM4nBIyDy+h4s
CrwgKM4o0SGAeEWRHyXnG0Kfs9d72SVueRVBwjlwposYdJvmwMnxIstpLrQI4yVCRmjElUirK8GMxMuX
LMhL1i1bkwT8tvFTzMG3Joj6nLHxZeMZ91uG2jRlCKnav8bhf8Gr7lLlxMCnSXyaxLBCV+cGeilzcBYB
luk+VGXsfUVaQCHDnSPXlvwnDSu6ggm+C8iPIgAH9Q4EIKPZyBo8R86U9ka3KNdbdoM9JGdNirlQm1Oz
j5jIc2Woq+JOBcQLZXRUaP3ORC6/nw+zFpC24oSCBc2uFbQ2SJgwZ/cq2GJRCFtAEBRm3XJaM5qpGy1X
kFcIJH+HKyOXtXXAtALsmbCRrKsieYnZXPE1hDNQzWUC0wGTlHKsd4e+/Xr267fKLSmCgX5lV7dlyIbm
7Sd9bqthwRCfHggvI25PLuo38VUdIgafnB9nhXTakS2Q2sX0f0U1ILVUmVTL34CEm4zW4fo7U6RYKjZH
t2SlzlTjI0TdciM8Be4bR93ljtyuN9Oj1ae+lHXXy2odrWKvYyy3/7qqZl9buspvxBhC91GVujMLJm9Q
+JwU+p0hraX6jGhnRLS/uv1/PFttv1sNfhtZU4U/Nb3CQiO+EfkA+l9Crf9zblnlq5wYyGbYeQNbniAP
py23VJ+2vLQtfxArsFqaBtYwvVqbU1B03/VqeJPWb8Mmc/xChy8L9W7KdxFsLdrqZp7zBYPIwy8zaH/u
+4gbweQFmkndOrUKVKu+ddT+gQF/6OnGT35uoOJTHCdXvz/G7UPNTwWsR/KxSJpvlwZRex1VvHD9CIHd
vNT9GICnn3Kc4a+q/59W/w0AAP//CCwovkZHAAA=
vUkHCHZaKj6K9eBXD/nHKknSnzXdQ0HSr0m6N0Z9fXz8XUtx3zx9kLh7zJFszf2XXx+bZz+ld9U4lldD
qBRbtsuaN9nhbw+/PVTDGxJzVFARyc3vQE3zDOGPkiFUg5/SA6BmUqTru1X1TqFUgIaBTr8m1eaSpCfp
Hgym1QaZ2KX141M9Q5KkGvDA6GCGfqs/Pb7O/9iT3dmzDjZbP1fEGEDx7+ne6tffnsj9n/+4/8+X+98e
svv1Lz+PXlfni7Btls9hywQzTIp+/bSnPLX/OvULkzyviQkfrb0lXMOYZwHmu8TnEM892Tvx3K7v4HnM
zkHysghKsKN6J2aa5ZeRnwaKYMIq21C9m8ZWyy/DcOM1Qgx3VO/EcLP8dQyvOqbde0y/vdxX/z3Vc87O
18wy2F/NxMjnuY7T5XP859kfqOckc1BcHuudu8+sIShAmLQ/piRJNyXjuX3qUsC/qimeBg+T5Ift3gfz
1O9Hf/mVon/v4aV/T6Uw8GJqpuaXbo5A0mfALeMQO4Jgo+meI+NMm0xiljNqnOM52QC/agZK6B6yLcoi
OMs2azjRzok6Dx7JuSG4g+iT1fsi0+zP0bk+pUwY2AGmd/3Y9ckaO5ksbJi2TVf/W68cE6aUqIzk+YgJ
gkiO1Y6YgUK7+UvSUrA/SvhnS2KwBHveHKVafuIdylIJnRUyD+loS5wpgpXJhohlURCxlB2fw3SEmCY3
ysg5tGsMX/Wrjbbl4SaJUGGHbwn4prB3qsxClkhjnc25RpckacnyeOLdOcQTBRRlsQFMTxPiiUWP/l6v
XG8s6RvCBGAmSBFWeoQchGGEZ1oB9emMQ2hz4mpVMOJ40sjbI0XYMW3w6KRdeRxgnPMbnkcOCkSusybK
Ov96SHPoQ65FXVku5q69Zprq4qv2lloDMw0E6f7C8bIgTMToEgiDRyVZ4z0/nFsEcch6bTv7GEAcGEpR
dHdDHPwYjH9RUsP1PrkHAy3jd70rWduWJbEg1Wa7tb1WMtW84QEOeahgO+EZZ+J5eRWHF4Mk20ttLkF4
6R4IN3u6B/o8M3xINRottYlRclaQXZhIsPGts5GSAxFjIkWD82jJiWlTPnOEF+PidFFRDqaVu11F6tPf
SZwVGaHkyA6AsTBaqtfw0AUPQpAkGE+PSL89NOH0jI3W/+J8ittdN7/9xL4SYy+3V6kUhFYAHkHrkEa1
4c0ccp4Q61i/f1HUdX60GyW6YEokCId9kDdey+Lgbyd2zogGfV34OvBCh18jdcI19u+zYz1DvXPGB6uB
qYY4m3PnRtZh5H3LWFqNo4exr6g9xNDAlETzJgHdq596hQ/N4tMYzxZ31KDbBIYR8f18WNilVtwDVLnh
TO8hP2cMSiOp5HGG4UyWxRvDTJB4EdJTyA6Mw87i2AVjEEieScGPEZTaEAymVjTQEpk5ZlKZxTGmO7H2
qvV9Xm28Iask8ZlP+f/Jp+ijpuYybK1NzkQmFYigbWgjVbZDQiFTgEw6j2LkYPMSm9BgMo1mO0F4yMxM
obYXphSMCRt7yVnB/EbjTCgF8VqD1dwQbQaeRbnsmQhhPkCIiAz2BM+4OmrD3Hrup1UkBho3F9Tz3bUb
WTvpz4Je9jbWXvTjNqpSB4O4miYydT+tkv81PPRIRjX5+iI/3q4U6Ttv7fWjEcE4YayZNiDoMX6hDZtU
YM6Nu+KirpqK7PypGHdsEm2rbQPFm7AiJJXKI5or2eivlNtz0WE4f3Bqe86ZOLZgghVlkX5Nvvgi1viT
uTG0t3JAM4De53u/S3yubvac4Zwun+ZbSsbtGmf2vFip2rlGjSFpsPllvmkk1NDBNNlYxShn3lYYwIMb
YIURGoJBZtWHOuw6hFigP2YVxbACZGkuhacEzfkA126NG/TfdPWYORUaUNoa9NSrUJd2CapJDB4Bkdd1
sCjwgqA4o0SHAOIVSX6UnG8Ifc5e67JLVHkVQcI5cKaLGHSb5sDJ8SLNaQpahPESISM0oiTSykowI/Hy
JQvyknXL1iQBu23sFHPwrQmivmdsfNlYxv2WoTZNGkKq9q+x+1+w1F2qnBj4VIlPlRhm6OrYQC+lDs4k
wDKtiqqMrVekBRQy3Dlybcp/0rCiK5jgK0B+lANwUO9AADKajbTBc+VMaW9URblesxvsITlrQsyF2pya
fcR4nitdXeV3KiBeKKOjXOt3JnL5/XyYtcBpK04oWNDs2oPWBgkT5uxeBftYFMIWEASFWbOc5oxm8kbL
JeQVAsnfoWTk0rYOmFaAPRM2knVlJC9Rmys+nXA6qrlIYDpgElKO5e6Qt1/OfvlWsSVFMNCv7Oq2DOnQ
vP6kz202LOji0wPhZUT15KJ+E1/WIWLwyfklV0imHdkCoV1M/1dUA1JLlUm1fAUk3GS0DuffmSLFUr45
uiUrdYYaH8HrlhvhSXDf2Osud+V2vZkeqT71qay7/qzW0SL2GsZy+6+zanbZ0pV+I8YQuo/K1J2ZMHmD
xOck0e90aS3Vp0c7w6P91fX/4+lq+5Fr8EPKmir8XeoVGhrxjcgHkP8SYv2fM8sqXuXEQDbDzhvo8gR5
OHW5pfrU5aV1+YNogdXSNNCGaWltTkDRfderYSWt34ZN5vg5D18U6t2UrxBsLdrKZp7zBZ3Iwy8zaH/u
+4gbweQFmkndMrUSVKu+ddT+NQK/6+nGT36boOJTHCel3x/j9qHmdwXWo/OxSJpvlwZeex2VvHD9YoHd
vNT9coCnn3Ic4a+q/59W/w0AAP//UTBYfXNHAAA=
`,
},
"/": {
isDir: true,
local: "",
},
"/data": {
name: "data",
local: `data`,
isDir: true,
local: "data",
},
}
var _escDirs = map[string][]os.FileInfo{
"data": {
_escData["/data/config_schema_v3.0.json"],
_escData["/data/config_schema_v3.1.json"],
_escData["/data/config_schema_v3.2.json"],
_escData["/data/config_schema_v3.3.json"],
_escData["/data/config_schema_v3.4.json"],
_escData["/data/config_schema_v3.5.json"],
_escData["/data/config_schema_v3.6.json"],
_escData["/data/config_schema_v3.7.json"],
_escData["/data/config_schema_v3.8.json"],
_escData["/data/config_schema_v3.9.json"],
},
}

View File

@ -11,6 +11,7 @@ var UnsupportedProperties = []string{
"build",
"cap_add",
"cap_drop",
"cgroupns_mode",
"cgroup_parent",
"devices",
"domainname",
@ -103,7 +104,7 @@ type Config struct {
Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
Extras map[string]interface{} `yaml:",inline", json:"-"`
Extras map[string]interface{} `yaml:",inline" json:"-"`
}
// MarshalJSON makes Config implement json.Marshaler
@ -159,6 +160,7 @@ type ServiceConfig struct {
Build BuildConfig `yaml:",omitempty" json:"build,omitempty"`
CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"`
CgroupNSMode string `mapstructure:"cgroupns_mode" yaml:"cgroupns_mode,omitempty" json:"cgroupns_mode,omitempty"`
CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"`
Command ShellCommand `yaml:",omitempty" json:"command,omitempty"`
Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`

View File

@ -10,6 +10,7 @@ import (
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/cli/cli/config/types"
"github.com/docker/docker/pkg/homedir"
"github.com/pkg/errors"
)
@ -27,10 +28,7 @@ var (
func init() {
if configDir == "" {
homedir, err := os.UserHomeDir()
if err == nil {
configDir = filepath.Join(homedir, configFileDir)
}
configDir = filepath.Join(homedir.Get(), configFileDir)
}
}
@ -114,7 +112,7 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
}
confFile := filepath.Join(homedir, oldConfigfile)
if _, err := os.Stat(confFile); err != nil {
return configFile, nil //missing file is not an error
return configFile, nil // missing file is not an error
}
file, err := os.Open(confFile)
if err != nil {

View File

@ -123,9 +123,11 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
}
var err error
for addr, ac := range configFile.AuthConfigs {
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
if err != nil {
return err
if ac.Auth != "" {
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
if err != nil {
return err
}
}
ac.Auth = ""
ac.ServerAddress = addr
@ -194,6 +196,9 @@ func (configFile *ConfigFile) Save() error {
os.Remove(temp.Name())
return err
}
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(configFile.Filename, temp.Name())
return os.Rename(temp.Name(), configFile.Filename)
}

View File

@ -0,0 +1,35 @@
// +build !windows
package configfile
import (
"os"
"syscall"
)
// copyFilePermissions copies file ownership and permissions from "src" to "dst",
// ignoring any error during the process.
func copyFilePermissions(src, dst string) {
var (
mode os.FileMode = 0600
uid, gid int
)
fi, err := os.Stat(src)
if err != nil {
return
}
if fi.Mode().IsRegular() {
mode = fi.Mode()
}
if err := os.Chmod(dst, mode); err != nil {
return
}
uid = int(fi.Sys().(*syscall.Stat_t).Uid)
gid = int(fi.Sys().(*syscall.Stat_t).Gid)
if uid > 0 && gid > 0 {
_ = os.Chown(dst, uid, gid)
}
}

View File

@ -0,0 +1,5 @@
package configfile
func copyFilePermissions(src, dst string) {
// TODO implement for Windows
}

View File

@ -21,6 +21,13 @@ type EndpointMeta struct {
DefaultNamespace string `json:",omitempty"`
AuthProvider *clientcmdapi.AuthProviderConfig `json:",omitempty"`
Exec *clientcmdapi.ExecConfig `json:",omitempty"`
UsernamePassword *UsernamePassword `json:"usernamePassword,omitempty"`
}
// UsernamePassword contains username/password auth info
type UsernamePassword struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
var _ command.EndpointDefaultResolver = &EndpointMeta{}
@ -62,6 +69,10 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
authInfo.ClientCertificateData = c.TLSData.Cert
authInfo.ClientKeyData = c.TLSData.Key
}
if c.UsernamePassword != nil {
authInfo.Username = c.UsernamePassword.Username
authInfo.Password = c.UsernamePassword.Password
}
authInfo.AuthProvider = c.AuthProvider
authInfo.Exec = c.Exec
cfg.Clusters["cluster"] = cluster

View File

@ -39,6 +39,13 @@ func FromKubeConfig(kubeconfig, kubeContext, namespaceOverride string) (Endpoint
Key: key,
}
}
var usernamePassword *UsernamePassword
if clientcfg.Username != "" || clientcfg.Password != "" {
usernamePassword = &UsernamePassword{
Username: clientcfg.Username,
Password: clientcfg.Password,
}
}
return Endpoint{
EndpointMeta: EndpointMeta{
EndpointMetaBase: context.EndpointMetaBase{
@ -48,6 +55,7 @@ func FromKubeConfig(kubeconfig, kubeContext, namespaceOverride string) (Endpoint
DefaultNamespace: ns,
AuthProvider: clientcfg.AuthProvider,
Exec: clientcfg.ExecProvider,
UsernamePassword: usernamePassword,
},
TLSData: tlsData,
}, nil

View File

@ -11,7 +11,7 @@ import (
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
v2 "github.com/docker/distribution/registry/api/v2"
distclient "github.com/docker/distribution/registry/client"
"github.com/docker/docker/registry"
digest "github.com/opencontainers/go-digest"
@ -103,9 +103,6 @@ func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, re
}
verifier := dgst.Verifier()
if err != nil {
return nil, err
}
if _, err := verifier.Write(configJSON); err != nil {
return nil, err
}
@ -212,7 +209,6 @@ func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named,
confirmedTLSRegistries := make(map[string]bool)
for _, endpoint := range endpoints {
if endpoint.Version == registry.APIVersion1 {
logrus.Debugf("skipping v1 endpoint %s", endpoint.URL)
continue

View File

@ -99,6 +99,7 @@ func ExactArgs(number int) cobra.PositionalArgs {
}
}
//nolint: unparam
func pluralize(word string, number int) string {
if number == 1 {
return word

View File

@ -1,78 +0,0 @@
package containerizedengine
import (
"context"
"io"
"github.com/containerd/containerd"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes/docker"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// NewClient returns a new containerizedengine client
// This client can be used to manage the lifecycle of
// dockerd running as a container on containerd.
func NewClient(sockPath string) (clitypes.ContainerizedClient, error) {
if sockPath == "" {
sockPath = containerdSockPath
}
cclient, err := containerd.New(sockPath)
if err != nil {
return nil, err
}
return &baseClient{
cclient: cclient,
}, nil
}
// Close will close the underlying clients
func (c *baseClient) Close() error {
return c.cclient.Close()
}
func (c *baseClient) pullWithAuth(ctx context.Context, imageName string, out clitypes.OutStream,
authConfig *types.AuthConfig) (containerd.Image, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Credentials: func(string) (string, string, error) {
return authConfig.Username, authConfig.Password, nil
},
})
ongoing := newJobs(imageName)
pctx, stopProgress := context.WithCancel(ctx)
progress := make(chan struct{})
bufin, bufout := io.Pipe()
go func() {
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
}()
go func() {
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
close(progress)
}()
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
ongoing.add(desc)
}
return nil, nil
})
image, err := c.cclient.Pull(ctx, imageName,
containerd.WithResolver(resolver),
containerd.WithImageHandler(h),
containerd.WithPullUnpack)
stopProgress()
if err != nil {
return nil, err
}
<-progress
return image, nil
}

View File

@ -1,215 +0,0 @@
package containerizedengine
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
"github.com/docker/docker/pkg/jsonmessage"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
var (
ticker = time.NewTicker(100 * time.Millisecond)
start = time.Now()
enc = json.NewEncoder(out)
statuses = map[string]statusInfo{}
done bool
)
defer ticker.Stop()
outer:
for {
select {
case <-ticker.C:
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = statusInfo{
Ref: ongoing.name,
Status: resolved,
}
keys := []string{ongoing.name}
activeSeen := map[string]struct{}{}
if !done {
active, err := cs.ListStatuses(ctx, "")
if err != nil {
logrus.Debugf("active check failed: %s", err)
continue
}
// update status of active entries!
for _, active := range active {
statuses[active.Ref] = statusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
activeSeen[active.Ref] = struct{}{}
}
}
err := updateNonActive(ctx, ongoing, cs, statuses, &keys, activeSeen, &done, start)
if err != nil {
continue outer
}
var ordered []statusInfo
for _, key := range keys {
ordered = append(ordered, statuses[key])
}
for _, si := range ordered {
jm := si.JSONMessage()
err := enc.Encode(jm)
if err != nil {
logrus.Debugf("failed to encode progress message: %s", err)
}
}
if done {
out.Close()
return
}
case <-ctx.Done():
done = true // allow ui to update once more
}
}
}
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys *[]string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
for _, j := range ongoing.jobs() {
key := remotes.MakeRefKey(ctx, j)
*keys = append(*keys, key)
if _, ok := activeSeen[key]; ok {
continue
}
status, ok := statuses[key]
if !*done && (!ok || status.Status == "downloading") {
info, err := cs.Info(ctx, j.Digest)
if err != nil {
if !errdefs.IsNotFound(err) {
logrus.Debugf("failed to get content info: %s", err)
return err
}
statuses[key] = statusInfo{
Ref: key,
Status: "waiting",
}
} else if info.CreatedAt.After(start) {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
Offset: info.Size,
Total: info.Size,
UpdatedAt: info.CreatedAt,
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "exists",
}
}
} else if *done {
if ok {
if status.Status != "done" && status.Status != "exists" {
status.Status = "done"
statuses[key] = status
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
}
}
}
}
return nil
}
type jobs struct {
name string
added map[digest.Digest]struct{}
descs []ocispec.Descriptor
mu sync.Mutex
resolved bool
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: map[digest.Digest]struct{}{},
}
}
func (j *jobs) add(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
j.resolved = true
if _, ok := j.added[desc.Digest]; ok {
return
}
j.descs = append(j.descs, desc)
j.added[desc.Digest] = struct{}{}
}
func (j *jobs) jobs() []ocispec.Descriptor {
j.mu.Lock()
defer j.mu.Unlock()
var descs []ocispec.Descriptor
return append(descs, j.descs...)
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
// statusInfo holds the status info for an upload or download
type statusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
// Shorten the ID to use up less width on the display
id := s.Ref
if strings.Contains(id, ":") {
split := strings.SplitN(id, ":", 2)
id = split[1]
}
id = fmt.Sprintf("%.12s", id)
return jsonmessage.JSONMessage{
ID: id,
Status: s.Status,
Progress: &jsonmessage.JSONProgress{
Current: s.Offset,
Total: s.Total,
},
}
}

View File

@ -1,49 +0,0 @@
package containerizedengine
import (
"context"
"errors"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
)
const (
containerdSockPath = "/run/containerd/containerd.sock"
engineNamespace = "com.docker"
)
var (
// ErrEngineAlreadyPresent returned when engine already present and should not be
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
// ErrEngineNotPresent returned when the engine is not present and should be
ErrEngineNotPresent = errors.New("engine not present")
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
)
type baseClient struct {
cclient containerdClient
}
// containerdClient abstracts the containerd client to aid in testability
type containerdClient interface {
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
GetImage(ctx context.Context, ref string) (containerd.Image, error)
Close() error
ContentStore() content.Store
ContainerService() containers.Store
Install(context.Context, containerd.Image, ...containerd.InstallOpts) error
Version(ctx context.Context) (containerd.Version, error)
}

View File

@ -1,183 +0,0 @@
package containerizedengine
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/docker/cli/internal/versions"
clitypes "github.com/docker/cli/types"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
ver "github.com/hashicorp/go-version"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ActivateEngine will switch the image from the CE to EE image
func (c *baseClient) ActivateEngine(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
authConfig *types.AuthConfig) error {
// If the user didn't specify an image, determine the correct enterprise image to use
if opts.EngineImage == "" {
localMetadata, err := versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
if err != nil {
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image")
}
engineImage := localMetadata.EngineImage
if engineImage == clitypes.EnterpriseEngineImage || engineImage == clitypes.CommunityEngineImage {
opts.EngineImage = clitypes.EnterpriseEngineImage
} else {
// Chop off the standard prefix and retain any trailing OS specific image details
// e.g., engine-community-dm -> engine-enterprise-dm
engineImage = strings.TrimPrefix(engineImage, clitypes.EnterpriseEngineImage)
engineImage = strings.TrimPrefix(engineImage, clitypes.CommunityEngineImage)
opts.EngineImage = clitypes.EnterpriseEngineImage + engineImage
}
}
ctx = namespaces.WithNamespace(ctx, engineNamespace)
return c.DoUpdate(ctx, opts, out, authConfig)
}
// DoUpdate performs the underlying engine update
func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
authConfig *types.AuthConfig) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
if opts.EngineVersion == "" {
// TODO - Future enhancement: This could be improved to be
// smart about figuring out the latest patch rev for the
// current engine version and automatically apply it so users
// could stay in sync by simply having a scheduled
// `docker engine update`
return fmt.Errorf("pick the version you want to update to with --version")
}
var localMetadata *clitypes.RuntimeMetadata
if opts.EngineImage == "" {
var err error
localMetadata, err = versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
if err != nil {
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image set to 'engine-community' or 'engine-enterprise'")
}
opts.EngineImage = localMetadata.EngineImage
}
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
// Look for desired image
image, err := c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
if err != nil {
return errors.Wrapf(err, "unable to pull image %s", imageName)
}
} else {
return errors.Wrapf(err, "unable to check for image %s", imageName)
}
}
// Make sure we're safe to proceed
newMetadata, err := c.PreflightCheck(ctx, image)
if err != nil {
return err
}
if localMetadata != nil {
if localMetadata.Platform != newMetadata.Platform {
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Refer to %s for update instructions.\n\n", newMetadata.Platform, getReleaseNotesURL(imageName))
}
}
if err := c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr")); err != nil {
return err
}
return versions.WriteRuntimeMetadata(opts.RuntimeMetadataDir, newMetadata)
}
// PreflightCheck verifies the specified image is compatible with the local system before proceeding to update/activate
// If things look good, the RuntimeMetadata for the new image is returned and can be written out to the host
func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image) (*clitypes.RuntimeMetadata, error) {
var metadata clitypes.RuntimeMetadata
ic, err := image.Config(ctx)
if err != nil {
return nil, err
}
var (
ociimage v1.Image
config v1.ImageConfig
)
switch ic.MediaType {
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
if err != nil {
return nil, err
}
if err := json.Unmarshal(p, &ociimage); err != nil {
return nil, err
}
config = ociimage.Config
default:
return nil, fmt.Errorf("unknown image %s config media type %s", image.Name(), ic.MediaType)
}
metadataString, ok := config.Labels["com.docker."+clitypes.RuntimeMetadataName]
if !ok {
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), clitypes.RuntimeMetadataName)
}
err = json.Unmarshal([]byte(metadataString), &metadata)
if err != nil {
return nil, errors.Wrapf(err, "malformed runtime metadata file in %s", image.Name())
}
// Current CLI only supports host install runtime
if metadata.Runtime != "host_install" {
return nil, fmt.Errorf("unsupported daemon image: %s\nConsult the release notes at %s for upgrade instructions", metadata.Runtime, getReleaseNotesURL(image.Name()))
}
// Verify local containerd is new enough
localVersion, err := c.cclient.Version(ctx)
if err != nil {
return nil, err
}
if metadata.ContainerdMinVersion != "" {
lv, err := ver.NewVersion(localVersion.Version)
if err != nil {
return nil, err
}
mv, err := ver.NewVersion(metadata.ContainerdMinVersion)
if err != nil {
return nil, err
}
if lv.LessThan(mv) {
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nConsult the release notes at %s for upgrade instructions",
localVersion.Version, metadata.ContainerdMinVersion, getReleaseNotesURL(image.Name()))
}
} // If omitted on metadata, no hard dependency on containerd version beyond 18.09 baseline
// All checks look OK, proceed with update
return &metadata, nil
}
// getReleaseNotesURL returns a release notes url
// If the image name does not contain a version tag, the base release notes URL is returned
func getReleaseNotesURL(imageName string) string {
versionTag := ""
distributionRef, err := reference.ParseNormalizedNamed(imageName)
if err == nil {
taggedRef, ok := distributionRef.(reference.NamedTagged)
if ok {
versionTag = taggedRef.Tag()
}
}
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
}

View File

@ -1,127 +0,0 @@
package versions
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
registryclient "github.com/docker/cli/cli/registry/client"
clitypes "github.com/docker/cli/types"
"github.com/docker/distribution/reference"
ver "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// defaultRuntimeMetadataDir is the location where the metadata file is stored
defaultRuntimeMetadataDir = "/var/lib/docker-engine"
)
// GetEngineVersions reports the versions of the engine that are available
func GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, registryPrefix, imageName, versionString string) (clitypes.AvailableVersions, error) {
if imageName == "" {
var err error
localMetadata, err := GetCurrentRuntimeMetadata("")
if err != nil {
return clitypes.AvailableVersions{}, err
}
imageName = localMetadata.EngineImage
}
imageRef, err := reference.ParseNormalizedNamed(path.Join(registryPrefix, imageName))
if err != nil {
return clitypes.AvailableVersions{}, err
}
tags, err := registryClient.GetTags(ctx, imageRef)
if err != nil {
return clitypes.AvailableVersions{}, err
}
return parseTags(tags, versionString)
}
func parseTags(tags []string, currentVersion string) (clitypes.AvailableVersions, error) {
var ret clitypes.AvailableVersions
currentVer, err := ver.NewVersion(currentVersion)
if err != nil {
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
}
downgrades := []clitypes.DockerVersion{}
patches := []clitypes.DockerVersion{}
upgrades := []clitypes.DockerVersion{}
currentSegments := currentVer.Segments()
for _, tag := range tags {
tmp, err := ver.NewVersion(tag)
if err != nil {
logrus.Debugf("Unable to parse %s: %s", tag, err)
continue
}
testVersion := clitypes.DockerVersion{Version: *tmp, Tag: tag}
if testVersion.LessThan(currentVer) {
downgrades = append(downgrades, testVersion)
continue
}
testSegments := testVersion.Segments()
// lib always provides min 3 segments
if testSegments[0] == currentSegments[0] &&
testSegments[1] == currentSegments[1] {
patches = append(patches, testVersion)
} else {
upgrades = append(upgrades, testVersion)
}
}
sort.Slice(downgrades, func(i, j int) bool {
return downgrades[i].Version.LessThan(&downgrades[j].Version)
})
sort.Slice(patches, func(i, j int) bool {
return patches[i].Version.LessThan(&patches[j].Version)
})
sort.Slice(upgrades, func(i, j int) bool {
return upgrades[i].Version.LessThan(&upgrades[j].Version)
})
ret.Downgrades = downgrades
ret.Patches = patches
ret.Upgrades = upgrades
return ret, nil
}
// GetCurrentRuntimeMetadata loads the current daemon runtime metadata information from the local host
func GetCurrentRuntimeMetadata(metadataDir string) (*clitypes.RuntimeMetadata, error) {
if metadataDir == "" {
metadataDir = defaultRuntimeMetadataDir
}
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var res clitypes.RuntimeMetadata
err = json.Unmarshal(data, &res)
if err != nil {
return nil, errors.Wrapf(err, "malformed runtime metadata file %s", filename)
}
return &res, nil
}
// WriteRuntimeMetadata stores the metadata on the local system
func WriteRuntimeMetadata(metadataDir string, metadata *clitypes.RuntimeMetadata) error {
if metadataDir == "" {
metadataDir = defaultRuntimeMetadataDir
}
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
data, err := json.Marshal(metadata)
if err != nil {
return err
}
os.Remove(filename)
return ioutil.WriteFile(filename, data, 0644)
}

View File

@ -32,7 +32,7 @@ func (o *ConfigOpt) Set(value string) error {
}
// support a simple syntax of --config foo
if len(fields) == 1 {
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
options.File.Name = fields[0]
options.ConfigName = fields[0]
o.values = append(o.values, options)
@ -72,6 +72,9 @@ func (o *ConfigOpt) Set(value string) error {
if options.ConfigName == "" {
return fmt.Errorf("source is required")
}
if options.File.Name == "" {
options.File.Name = options.ConfigName
}
o.values = append(o.values, options)
return nil

View File

@ -24,6 +24,12 @@ var (
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
// DefaultNamedPipe defines the default named pipe used by docker on Windows
DefaultNamedPipe = `//./pipe/docker_engine`
// hostGatewayName defines a special string which users can append to --add-host
// to add an extra entry in /etc/hosts that maps host.docker.internal to the host IP
// TODO Consider moving the HostGatewayName constant defined in docker at
// github.com/docker/docker/daemon/network/constants.go outside of the "daemon"
// package, so that the CLI can consume it.
hostGatewayName = "host-gateway"
)
// ValidateHost validates that the specified string is a valid host and returns it.
@ -160,8 +166,11 @@ func ValidateExtraHost(val string) (string, error) {
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
// Skip IPaddr validation for "host-gateway" string
if arr[1] != hostGatewayName {
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
}
return val, nil
}

View File

@ -8,9 +8,11 @@ import (
)
const (
networkOptName = "name"
networkOptAlias = "alias"
driverOpt = "driver-opt"
networkOptName = "name"
networkOptAlias = "alias"
networkOptIPv4Address = "ip"
networkOptIPv6Address = "ip6"
driverOpt = "driver-opt"
)
// NetworkAttachmentOpts represents the network options for endpoint creation
@ -19,8 +21,8 @@ type NetworkAttachmentOpts struct {
Aliases []string
DriverOpts map[string]string
Links []string // TODO add support for links in the csv notation of `--network`
IPv4Address string // TODO add support for IPv4-address in the csv notation of `--network`
IPv6Address string // TODO add support for IPv6-address in the csv notation of `--network`
IPv4Address string
IPv6Address string
LinkLocalIPs []string // TODO add support for LinkLocalIPs in the csv notation of `--network` ?
}
@ -60,6 +62,10 @@ func (n *NetworkOpt) Set(value string) error {
netOpt.Target = value
case networkOptAlias:
netOpt.Aliases = append(netOpt.Aliases, value)
case networkOptIPv4Address:
netOpt.IPv4Address = value
case networkOptIPv6Address:
netOpt.IPv6Address = value
case driverOpt:
key, value, err = parseDriverOpt(value)
if err == nil {

View File

@ -160,7 +160,7 @@ func ConvertPortToPortConfig(
for i := startHostPort; i <= endHostPort; i++ {
ports = append(ports, swarm.PortConfig{
//TODO Name: ?
// TODO Name: ?
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
TargetPort: uint32(port.Int()),
PublishedPort: uint32(i),

View File

@ -32,7 +32,7 @@ func (o *SecretOpt) Set(value string) error {
}
// support a simple syntax of --secret foo
if len(fields) == 1 {
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
options.File.Name = fields[0]
options.SecretName = fields[0]
o.values = append(o.values, options)
@ -72,6 +72,9 @@ func (o *SecretOpt) Set(value string) error {
if options.SecretName == "" {
return fmt.Errorf("source is required")
}
if options.File.Name == "" {
options.File.Name = options.SecretName
}
o.values = append(o.values, options)
return nil

View File

@ -48,9 +48,6 @@ func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
if err != nil {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
if rate < 0 {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
return &blkiodev.ThrottleDevice{Path: split[0], Rate: rate}, nil
}

View File

@ -1,88 +0,0 @@
package types
import (
"context"
"io"
"github.com/docker/docker/api/types"
ver "github.com/hashicorp/go-version"
)
const (
// CommunityEngineImage is the repo name for the community engine
CommunityEngineImage = "engine-community"
// EnterpriseEngineImage is the repo name for the enterprise engine
EnterpriseEngineImage = "engine-enterprise"
// RegistryPrefix is the default prefix used to pull engine images
RegistryPrefix = "docker.io/store/docker"
// ReleaseNotePrefix is where to point users to for release notes
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
// RuntimeMetadataName is the name of the runtime metadata file
// When stored as a label on the container it is prefixed by "com.docker."
RuntimeMetadataName = "distribution_based_engine"
)
// ContainerizedClient can be used to manage the lifecycle of
// dockerd running as a container on containerd.
type ContainerizedClient interface {
Close() error
ActivateEngine(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig) error
DoUpdate(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig) error
}
// EngineInitOptions contains the configuration settings
// use during initialization of a containerized docker engine
type EngineInitOptions struct {
RegistryPrefix string
EngineImage string
EngineVersion string
ConfigFile string
RuntimeMetadataDir string
}
// AvailableVersions groups the available versions which were discovered
type AvailableVersions struct {
Downgrades []DockerVersion
Patches []DockerVersion
Upgrades []DockerVersion
}
// DockerVersion wraps a semantic version to retain the original tag
// since the docker date based versions don't strictly follow semantic
// versioning (leading zeros, etc.)
type DockerVersion struct {
ver.Version
Tag string
}
// Update stores available updates for rendering in a table
type Update struct {
Type string
Version string
Notes string
}
// OutStream is an output stream used to write normal program output.
type OutStream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}
// RuntimeMetadata holds platform information about the daemon
type RuntimeMetadata struct {
Platform string `json:"platform"`
ContainerdMinVersion string `json:"containerd_min_version"`
Runtime string `json:"runtime"`
EngineImage string `json:"engine_image"`
}

View File

@ -65,7 +65,7 @@ info:
# Authentication
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure:
```
{
@ -618,6 +618,71 @@ definitions:
description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
type: "integer"
Health:
description: |
Health stores information about the container's healthcheck results.
type: "object"
properties:
Status:
description: |
Status is one of `none`, `starting`, `healthy` or `unhealthy`
- "none" Indicates there is no healthcheck
- "starting" Starting indicates that the container is not yet ready
- "healthy" Healthy indicates that the container is running correctly
- "unhealthy" Unhealthy indicates that the container has a problem
type: "string"
enum:
- "none"
- "starting"
- "healthy"
- "unhealthy"
example: "healthy"
FailingStreak:
description: "FailingStreak is the number of consecutive failures"
type: "integer"
example: 0
Log:
type: "array"
description: |
Log contains the last few results (oldest first)
items:
x-nullable: true
$ref: "#/definitions/HealthcheckResult"
HealthcheckResult:
description: |
HealthcheckResult stores information about a single run of a healthcheck probe
type: "object"
properties:
Start:
description: |
Date and time at which this check started in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "date-time"
example: "2020-01-04T10:44:24.496525531Z"
End:
description: |
Date and time at which this check ended in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string"
format: "dateTime"
example: "2020-01-04T10:45:21.364524523Z"
ExitCode:
description: |
ExitCode meanings:
- `0` healthy
- `1` unhealthy
- `2` reserved (considered unhealthy)
- other values: error running probe
type: "integer"
example: 0
Output:
description: "Output from last check"
type: "string"
HostConfig:
description: "Container configuration that depends on the host we are running on"
allOf:
@ -628,12 +693,44 @@ definitions:
Binds:
type: "array"
description: |
A list of volume bindings for this container. Each volume binding is a string in one of these forms:
A list of volume bindings for this container. Each volume binding
is a string in one of these forms:
- `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
- `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
- `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.
- `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.
- `host-src:container-dest[:options]` to bind-mount a host path
into the container. Both `host-src`, and `container-dest` must
be an _absolute_ path.
- `volume-name:container-dest[:options]` to bind-mount a volume
managed by a volume driver into the container. `container-dest`
must be an _absolute_ path.
`options` is an optional, comma-delimited list of:
- `nocopy` disables automatic copying of data from the container
path to the volume. The `nocopy` flag only applies to named volumes.
- `[ro|rw]` mounts a volume read-only or read-write, respectively.
If omitted or set to `rw`, volumes are mounted read-write.
- `[z|Z]` applies SELinux labels to allow or deny multiple containers
to read and write to the same volume.
- `z`: a _shared_ content label is applied to the content. This
label indicates that multiple containers can share the volume
content, for both reading and writing.
- `Z`: a _private unshared_ label is applied to the content.
This label indicates that only the current container can use
a private volume. Labeling systems such as SELinux require
proper labels to be placed on volume content that is mounted
into a container. Without a label, the security system can
prevent a container's processes from using the content. By
default, the labels set by the host operating system are not
modified.
- `[[r]shared|[r]slave|[r]private]` specifies mount
[propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
This only applies to bind-mounted volumes, not internal volumes
or named volumes. Mount propagation requires the source mount
point (the location where the source directory is mounted in the
host operating system) to have the correct propagation properties.
For shared volumes, the source mount point must be set to `shared`.
For slave volumes, the mount must be set to either `shared` or
`slave`.
items:
type: "string"
ContainerIDFile:
@ -2949,7 +3046,27 @@ definitions:
type: "object"
properties:
Constraints:
description: "An array of constraints."
description: |
An array of constraint expressions to limit the set of nodes where
a task can be scheduled. Constraint expressions can either use a
_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
nodes that satisfy every expression (AND match). Constraints can
match node or Docker Engine labels as follows:
node attribute | matches | example
---------------------|--------------------------------|-----------------------------------------------
`node.id` | Node ID | `node.id==2ivku8v2gvtg4`
`node.hostname` | Node hostname | `node.hostname!=node-2`
`node.role` | Node role (`manager`/`worker`) | `node.role==manager`
`node.platform.os` | Node operating system | `node.platform.os==windows`
`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
`node.labels` | User-defined node labels | `node.labels.security==high`
`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`
`engine.labels` apply to Docker Engine labels like operating system,
drivers, etc. Swarm administrators add `node.labels` for operational
purposes by using the [`node update endpoint`](#operation/NodeUpdate).
type: "array"
items:
type: "string"
@ -2957,6 +3074,8 @@ definitions:
- "node.hostname!=node3.corp.example.com"
- "node.role!=manager"
- "node.labels.type==production"
- "node.platform.os==linux"
- "node.platform.arch==x86_64"
Preferences:
description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence."
type: "array"
@ -3086,6 +3205,12 @@ definitions:
type: "integer"
DesiredState:
$ref: "#/definitions/TaskState"
JobIteration:
description: |
If the Service this Task belongs to is a job-mode service, contains
the JobIteration of the Service this Task was created for. Absent if
the Task was created for a Replicated or Global Service.
$ref: "#/definitions/ObjectVersion"
example:
ID: "0kzzo1i0y4jz6027t0k7aezc7"
Version:
@ -3178,6 +3303,22 @@ definitions:
format: "int64"
Global:
type: "object"
ReplicatedJob:
description: "The mode used for services with a finite number of tasks that run to a completed state."
type: "object"
properties:
MaxConcurrent:
description: "The maximum number of replicas to run simultaneously."
type: "integer"
format: "int64"
default: 1
TotalCompletions:
description: "The total number of replicas desired to reach the Completed state. If unset, will default to the value of MaxConcurrent"
type: "integer"
format: "int64"
GlobalJob:
description: "The mode used for services which run a task to the completed state on each valid node."
type: "object"
UpdateConfig:
description: "Specification for the update strategy of the service."
type: "object"
@ -3378,6 +3519,37 @@ definitions:
type: "integer"
format: "uint64"
example: 10
CompletedTasks:
description: |
The number of tasks for a job that are in the Completed state.
This field must be cross-referenced with the service type, as the
value of 0 may mean the service is not in a job mode, or it may
mean the job-mode service has no tasks yet Completed.
type: "integer"
format: "uint64"
JobStatus:
description: |
The status of the service when it is in one of ReplicatedJob or
GlobalJob modes. Absent on Replicated and Global mode services. The
JobIteration is an ObjectVersion, but unlike the Service's version,
does not need to be sent with an update request.
type: "object"
properties:
JobIteration:
description: |
JobIteration is a value increased each time a Job is executed,
successfully or otherwise. "Executed", in this case, means the
job as a whole has been started, not that an individual Task has
been launched. A job is "Executed" when its ServiceSpec is
updated. JobIteration can be used to disambiguate Tasks belonging
to different executions of a job. Though JobIteration will
increase with each subsequent execution, it may not necessarily
increase by 1, and so JobIteration should not be used to
$ref: "#/definitions/ObjectVersion"
LastExecution:
description: "The last time, as observed by the server, that this job was started"
type: "string"
format: "dateTime"
example:
ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Version:
@ -3566,7 +3738,7 @@ definitions:
com.example.some-other-label: "some-other-value"
Data:
description: |
Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
data to store as secret.
This field is only used to _create_ a secret, and is not returned by
@ -3616,7 +3788,7 @@ definitions:
type: "string"
Data:
description: |
Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
config data.
type: "string"
Templating:
@ -3643,6 +3815,70 @@ definitions:
Spec:
$ref: "#/definitions/ConfigSpec"
ContainerState:
description: |
ContainerState stores container's running state. It's part of ContainerJSONBase
and will be returned by the "inspect" command.
type: "object"
properties:
Status:
description: |
String representation of the container state. Can be one of "created",
"running", "paused", "restarting", "removing", "exited", or "dead".
type: "string"
enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
example: "running"
Running:
description: |
Whether this container is running.
Note that a running container can be _paused_. The `Running` and `Paused`
booleans are not mutually exclusive:
When pausing a container (on Linux), the freezer cgroup is used to suspend
all processes in the container. Freezing the process requires the process to
be running. As a result, paused containers are both `Running` _and_ `Paused`.
Use the `Status` field instead to determine if a container's state is "running".
type: "boolean"
example: true
Paused:
description: "Whether this container is paused."
type: "boolean"
example: false
Restarting:
description: "Whether this container is restarting."
type: "boolean"
example: false
OOMKilled:
description: "Whether this container has been killed because it ran out of memory."
type: "boolean"
example: false
Dead:
type: "boolean"
example: false
Pid:
description: "The process ID of this container"
type: "integer"
example: 1234
ExitCode:
description: "The last exit code of this container"
type: "integer"
example: 0
Error:
type: "string"
StartedAt:
description: "The time when this container was last started."
type: "string"
example: "2020-01-06T09:06:59.461876391Z"
FinishedAt:
description: "The time when this container last exited."
type: "string"
example: "2020-01-06T09:07:59.461876391Z"
Health:
x-nullable: true
$ref: "#/definitions/Health"
SystemInfo:
type: "object"
properties:
@ -3717,44 +3953,6 @@ definitions:
on Windows.
type: "string"
example: "/var/lib/docker"
SystemStatus:
description: |
Status information about this node (standalone Swarm API).
<p><br /></p>
> **Note**: The information returned in this field is only propagated
> by the Swarm standalone API, and is empty (`null`) when using
> built-in swarm mode.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["Role", "primary"]
- ["State", "Healthy"]
- ["Strategy", "spread"]
- ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"]
- ["Nodes", "2"]
- [" swarm-agent-00", "192.168.99.102:2376"]
- [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"]
- [" └ Status", "Healthy"]
- [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"]
- [" └ Reserved CPUs", "0 / 1"]
- [" └ Reserved Memory", "0 B / 1.021 GiB"]
- [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
- [" └ UpdatedAt", "2017-08-09T10:03:46Z"]
- [" └ ServerVersion", "17.06.0-ce"]
- [" swarm-manager", "192.168.99.101:2376"]
- [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"]
- [" └ Status", "Healthy"]
- [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"]
- [" └ Reserved CPUs", "0 / 1"]
- [" └ Reserved Memory", "0 B / 1.021 GiB"]
- [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
- [" └ UpdatedAt", "2017-08-09T10:04:11Z"]
- [" └ ServerVersion", "17.06.0-ce"]
Plugins:
$ref: "#/definitions/PluginsInfo"
MemoryLimit:
@ -4885,52 +5083,8 @@ paths:
items:
type: "string"
State:
description: "The state of the container."
type: "object"
properties:
Status:
description: |
The status of the container. For example, `"running"` or `"exited"`.
type: "string"
enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
Running:
description: |
Whether this container is running.
Note that a running container can be _paused_. The `Running` and `Paused`
booleans are not mutually exclusive:
When pausing a container (on Linux), the freezer cgroup is used to suspend
all processes in the container. Freezing the process requires the process to
be running. As a result, paused containers are both `Running` _and_ `Paused`.
Use the `Status` field instead to determine if a container's state is "running".
type: "boolean"
Paused:
description: "Whether this container is paused."
type: "boolean"
Restarting:
description: "Whether this container is restarting."
type: "boolean"
OOMKilled:
description: "Whether this container has been killed because it ran out of memory."
type: "boolean"
Dead:
type: "boolean"
Pid:
description: "The process ID of this container"
type: "integer"
ExitCode:
description: "The last exit code of this container"
type: "integer"
Error:
type: "string"
StartedAt:
description: "The time when this container was last started."
type: "string"
FinishedAt:
description: "The time when this container last exited."
type: "string"
x-nullable: true
$ref: "#/definitions/ContainerState"
Image:
description: "The container's image"
type: "string"
@ -4942,9 +5096,6 @@ paths:
type: "string"
LogPath:
type: "string"
Node:
description: "TODO"
type: "object"
Name:
type: "string"
RestartCount:
@ -5002,6 +5153,8 @@ paths:
Domainname: ""
Env:
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Healthcheck:
Test: ["CMD-SHELL", "exit 0"]
Hostname: "ba033ac44011"
Image: "ubuntu"
Labels:
@ -5113,6 +5266,14 @@ paths:
Error: ""
ExitCode: 9
FinishedAt: "2015-01-06T15:47:32.080254511Z"
Health:
Status: "healthy"
FailingStreak: 0
Log:
- Start: "2019-12-22T10:59:05.6385933Z"
End: "2019-12-22T10:59:05.8078452Z"
ExitCode: 0
Output: ""
OOMKilled: false
Dead: false
Paused: false
@ -6667,6 +6828,10 @@ paths:
in: "query"
description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
type: "string"
- name: "message"
in: "query"
description: "Set commit message for imported image."
type: "string"
- name: "inputImage"
in: "body"
description: "Image content if the value `-` has been specified in fromSrc query parameter"
@ -6675,7 +6840,7 @@ paths:
required: false
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
type: "string"
- name: "platform"
in: "query"
@ -6903,7 +7068,7 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
type: "string"
required: true
tags: ["Image"]
@ -8609,7 +8774,7 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
type: "string"
- name: "body"
in: "body"
@ -8774,7 +8939,7 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
type: "string"
- name: "body"
in: "body"
@ -9471,7 +9636,7 @@ paths:
foo: "bar"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
type: "string"
tags: ["Service"]
/services/{id}:
@ -9630,7 +9795,7 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
description: "A base64url-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
type: "string"
tags: ["Service"]

View File

@ -50,7 +50,7 @@ type ContainerCommitOptions struct {
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
ExecID string
ExecID string `json:"ID"`
ContainerID string
Running bool
ExitCode int
@ -205,7 +205,7 @@ const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
BuilderBuildKit = "2"
BuilderBuildKit BuilderVersion = "2"
)
// ImageBuildResponse holds information
@ -265,7 +265,7 @@ type ImagePullOptions struct {
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
//ImagePushOptions holds information to push images.
// ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.

View File

@ -145,7 +145,7 @@ func (n NetworkMode) ConnectedContainer() string {
return ""
}
//UserDefined indicates user-created network
// UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)

View File

@ -154,7 +154,7 @@ func (args Args) Len() int {
func (args Args) MatchKVList(key string, sources map[string]string) bool {
fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
@ -200,7 +200,7 @@ func (args Args) Match(field, source string) bool {
// ExactMatch returns true if the source matches exactly one of the values.
func (args Args) ExactMatch(key, source string) bool {
fieldValues, ok := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
}
@ -213,7 +213,7 @@ func (args Args) ExactMatch(key, source string) bool {
// matches exactly the value.
func (args Args) UniqueExactMatch(key, source string) bool {
fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}

View File

@ -13,7 +13,7 @@ type Address struct {
// IPAM represents IP Address Management
type IPAM struct {
Driver string
Options map[string]string //Per network IPAM driver options
Options map[string]string // Per network IPAM driver options
Config []IPAMConfig
}

View File

@ -17,6 +17,10 @@ type Service struct {
// listing all tasks for a service, an operation that could be
// computation and network expensive.
ServiceStatus *ServiceStatus `json:",omitempty"`
// JobStatus is the status of a Service which is in one of ReplicatedJob or
// GlobalJob modes. It is absent on Replicated and Global services.
JobStatus *JobStatus `json:",omitempty"`
}
// ServiceSpec represents the spec of a service.
@ -39,8 +43,10 @@ type ServiceSpec struct {
// ServiceMode represents the mode of a service.
type ServiceMode struct {
Replicated *ReplicatedService `json:",omitempty"`
Global *GlobalService `json:",omitempty"`
Replicated *ReplicatedService `json:",omitempty"`
Global *GlobalService `json:",omitempty"`
ReplicatedJob *ReplicatedJob `json:",omitempty"`
GlobalJob *GlobalJob `json:",omitempty"`
}
// UpdateState is the state of a service update.
@ -77,6 +83,32 @@ type ReplicatedService struct {
// GlobalService is a kind of ServiceMode.
type GlobalService struct{}
// ReplicatedJob is the a type of Service which executes a defined Tasks
// in parallel until the specified number of Tasks have succeeded.
type ReplicatedJob struct {
// MaxConcurrent indicates the maximum number of Tasks that should be
// executing simultaneously for this job at any given time. There may be
// fewer Tasks that MaxConcurrent executing simultaneously; for example, if
// there are fewer than MaxConcurrent tasks needed to reach
// TotalCompletions.
//
// If this field is empty, it will default to a max concurrency of 1.
MaxConcurrent *uint64 `json:",omitempty"`
// TotalCompletions is the total number of Tasks desired to run to
// completion.
//
// If this field is empty, the value of MaxConcurrent will be used.
TotalCompletions *uint64 `json:",omitempty"`
}
// GlobalJob is the type of a Service which executes a Task on every Node
// matching the Service's placement constraints. These tasks run to completion
// and then exit.
//
// This type is deliberately empty.
type GlobalJob struct{}
const (
// UpdateFailureActionPause PAUSE
UpdateFailureActionPause = "pause"
@ -142,4 +174,29 @@ type ServiceStatus struct {
// services, this is computed by taking the number of tasks with desired
// state of not-Shutdown.
DesiredTasks uint64
// CompletedTasks is the number of tasks in the state Completed, if this
// service is in ReplicatedJob or GlobalJob mode. This field must be
// cross-referenced with the service type, because the default value of 0
// may mean that a service is not in a job mode, or it may mean that the
// job has yet to complete any tasks.
CompletedTasks uint64
}
// JobStatus is the status of a job-type service.
type JobStatus struct {
// JobIteration is a value increased each time a Job is executed,
// successfully or otherwise. "Executed", in this case, means the job as a
// whole has been started, not that an individual Task has been launched. A
// job is "Executed" when its ServiceSpec is updated. JobIteration can be
// used to disambiguate Tasks belonging to different executions of a job.
//
// Though JobIteration will increase with each subsequent execution, it may
// not necessarily increase by 1, and so JobIteration should not be used to
// keep track of the number of times a job has been executed.
JobIteration Version
// LastExecution is the time that the job was last executed, as observed by
// Swarm manager.
LastExecution time.Time `json:",omitempty"`
}

View File

@ -56,6 +56,12 @@ type Task struct {
DesiredState TaskState `json:",omitempty"`
NetworksAttachments []NetworkAttachment `json:",omitempty"`
GenericResources []GenericResource `json:",omitempty"`
// JobIteration is the JobIteration of the Service that this Task was
// spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
// used to determine which Tasks belong to which run of the job. This field
// is absent if the Service mode is Replicated or Global.
JobIteration *Version `json:",omitempty"`
}
// TaskSpec represents the spec of a task.

View File

@ -154,7 +154,7 @@ type Info struct {
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string
SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
@ -318,7 +318,7 @@ type ContainerState struct {
}
// ContainerNode stores information about the node that a container
// is running on. It's only available in Docker Swarm
// is running on. It's only used by the Docker Swarm standalone API
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
@ -342,7 +342,7 @@ type ContainerJSONBase struct {
HostnamePath string
HostsPath string
LogPath string
Node *ContainerNode `json:",omitempty"`
Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
Name string
RestartCount int
Driver string

View File

@ -1,4 +1,4 @@
// +build linux freebsd openbsd darwin
// +build linux freebsd openbsd darwin solaris illumos
package client // import "github.com/docker/docker/client"

View File

@ -14,7 +14,7 @@ import (
// It returns the JSON content in the response body.
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
if ref != "" {
//Check if the given image name can be resolved
// Check if the given image name can be resolved
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
return nil, err
}

View File

@ -25,15 +25,14 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im
return nil, errors.New("cannot push a digest reference")
}
tag := ""
name := reference.FamiliarName(ref)
if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged {
tag = nameTaggedRef.Tag()
}
query := url.Values{}
query.Set("tag", tag)
if !options.All {
ref = reference.TagNameOnly(ref)
if tagged, ok := ref.(reference.Tagged); ok {
query.Set("tag", tagged.Tag())
}
}
resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {

View File

@ -17,7 +17,7 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
var ping types.Ping
// Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
// because ping requests are used during API version negotiation, so we want
// because ping requests are used during API version negotiation, so we want
// to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
if err != nil {

View File

@ -178,7 +178,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
clearLine(out)
endl = "\r"
fmt.Fprint(out, endl)
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
} else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {

View File

@ -13,7 +13,7 @@ import (
"unsafe"
)
//parseMountTable returns information about mounted filesystems
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
var rawEntries *C.struct_statfs

View File

@ -231,6 +231,9 @@ var (
// Dame Mary Lucy Cartwright - British mathematician who was one of the first to study what is now known as chaos theory. Also known for Cartwright's theorem which finds applications in signal processing. https://en.wikipedia.org/wiki/Mary_Cartwright
"cartwright",
// George Washington Carver - American agricultural scientist and inventor. He was the most prominent black scientist of the early 20th century. https://en.wikipedia.org/wiki/George_Washington_Carver
"carver",
// Vinton Gray Cerf - American Internet pioneer, recognised as one of "the fathers of the Internet". With Robert Elliot Kahn, he designed TCP and IP, the primary data communication protocols of the Internet and other computer networks. https://en.wikipedia.org/wiki/Vint_Cerf
"cerf",
@ -452,6 +455,9 @@ var (
// Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil
"jang",
// Mae Carol Jemison - is an American engineer, physician, and former NASA astronaut. She became the first black woman to travel in space when she served as a mission specialist aboard the Space Shuttle Endeavour - https://en.wikipedia.org/wiki/Mae_Jemison
"jemison",
// Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik
"jennings",

View File

@ -6,9 +6,9 @@ import (
"time"
)
//setCTime will set the create time on a file. On Unix, the create
//time is updated as a side effect of setting the modified time, so
//no action is required.
// setCTime will set the create time on a file. On Unix, the create
// time is updated as a side effect of setting the modified time, so
// no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}

View File

@ -6,8 +6,8 @@ import (
"golang.org/x/sys/windows"
)
//setCTime will set the create time on a file. On Windows, this requires
//calling SetFileTime and explicitly including the create time.
// setCTime will set the create time on a file. On Windows, this requires
// calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path)

View File

@ -235,7 +235,7 @@ func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle,
createmode = windows.OPEN_EXISTING
}
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
return h, e

View File

@ -10,36 +10,36 @@ import (
)
const (
OWNER_SECURITY_INFORMATION = 0x00000001
GROUP_SECURITY_INFORMATION = 0x00000002
DACL_SECURITY_INFORMATION = 0x00000004
SACL_SECURITY_INFORMATION = 0x00000008
LABEL_SECURITY_INFORMATION = 0x00000010
ATTRIBUTE_SECURITY_INFORMATION = 0x00000020
SCOPE_SECURITY_INFORMATION = 0x00000040
OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION
GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION
DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION
SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION
LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION
ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION
SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION
PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080
ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100
BACKUP_SECURITY_INFORMATION = 0x00010000
PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000
PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000
UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION
PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION
PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION
UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION
UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION
)
const (
SE_UNKNOWN_OBJECT_TYPE = iota
SE_FILE_OBJECT
SE_SERVICE
SE_PRINTER
SE_REGISTRY_KEY
SE_LMSHARE
SE_KERNEL_OBJECT
SE_WINDOW_OBJECT
SE_DS_OBJECT
SE_DS_OBJECT_ALL
SE_PROVIDER_DEFINED_OBJECT
SE_WMIGUID_OBJECT
SE_REGISTRY_WOW64_32KEY
SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE
SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT
SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE
SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER
SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY
SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE
SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT
SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT
SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT
SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL
SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT
SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT
SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY
)
const (
@ -64,6 +64,7 @@ var (
type OSVersion = osversion.OSVersion
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported)
type osVersionInfoEx struct {
OSVersionInfoSize uint32
MajorVersion uint32
@ -86,8 +87,6 @@ func GetOSVersion() OSVersion {
}
// IsWindowsClient returns true if the SKU is client
// @engine maintainers - this function should not be removed or modified as it
// is used to enforce licensing restrictions on Windows.
func IsWindowsClient() bool {
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
@ -101,7 +100,7 @@ func IsWindowsClient() bool {
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
func Unmount(dest string) error {
func Unmount(_ string) error {
return nil
}
@ -122,7 +121,7 @@ func CommandLineToArgv(commandLine string) ([]string, error) {
newArgs := make([]string, argc)
for i, v := range (*argv)[:argc] {
newArgs[i] = string(windows.UTF16ToString((*v)[:]))
newArgs[i] = windows.UTF16ToString((*v)[:])
}
return newArgs, nil
@ -149,7 +148,7 @@ func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, da
r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0)
if r1 == 0 {
if e1 != 0 {
result = syscall.Errno(e1)
result = e1
} else {
result = syscall.EINVAL
}

View File

@ -10,24 +10,23 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
dest := make([]byte, 128)
sz, errno := unix.Lgetxattr(path, attr, dest)
switch {
case errno == unix.ENODATA:
return nil, nil
case errno == unix.ERANGE:
// 128 byte array might just not be good enough. A dummy buffer is used
// to get the real size of the xattrs on disk
for errno == unix.ERANGE {
// Buffer too small, use zero-sized buffer to get the actual size
sz, errno = unix.Lgetxattr(path, attr, []byte{})
if errno != nil {
return nil, errno
}
dest = make([]byte, sz)
sz, errno = unix.Lgetxattr(path, attr, dest)
if errno != nil {
return nil, errno
}
}
switch {
case errno == unix.ENODATA:
return nil, nil
case errno != nil:
return nil, errno
}
return dest[:sz], nil
}

View File

@ -14,11 +14,10 @@ import (
"time"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/rootless"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
)
var (
@ -70,7 +69,7 @@ func hasFile(files []os.FileInfo, name string) bool {
// provided TLS configuration.
func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
fs, err := ioutil.ReadDir(directory)
if err != nil && !os.IsNotExist(err) && !os.IsPermission(err) {
if err != nil && !os.IsNotExist(err) {
return err
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,70 +0,0 @@
# Contributing to Docker open source projects
Want to hack on go-events? Awesome! Here are instructions to get you started.
go-events is part of the [Docker](https://www.docker.com) project, and
follows the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read Docker's
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
For an in-depth description of our contribution process, visit the
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
### Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,46 +0,0 @@
# go-events maintainers file
#
# This file describes who runs the docker/go-events project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"aaronlehmann",
"aluzzardi",
"lk4d4",
"stevvooe",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.aluzzardi]
Name = "Andrea Luzzardi"
Email = "al@docker.com"
GitHub = "aluzzardi"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

View File

@ -1,117 +0,0 @@
# Docker Events Package
[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events)
[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events)
The Docker `events` package implements a composable event distribution package
for Go.
Originally created to implement the [notifications in Docker Registry
2](https://github.com/docker/distribution/blob/master/docs/notifications.md),
we've found the pattern to be useful in other applications. This package is
most of the same code with slightly updated interfaces. Much of the internals
have been made available.
## Usage
The `events` package centers around a `Sink` type. Events are written with
calls to `Sink.Write(event Event)`. Sinks can be wired up in various
configurations to achieve interesting behavior.
The canonical example is that employed by the
[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications)
package. Let's say we have a type `httpSink` where we'd like to queue
notifications. As a rule, it should send a single http request and return an
error if it fails:
```go
func (h *httpSink) Write(event Event) error {
p, err := json.Marshal(event)
if err != nil {
return err
}
body := bytes.NewReader(p)
resp, err := h.client.Post(h.url, "application/json", body)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.Status != 200 {
return errors.New("unexpected status")
}
return nil
}
// implement (*httpSink).Close()
```
With just that, we can start using components from this package. One can call
`(*httpSink).Write` to send events as the body of a post request to a
configured URL.
### Retries
HTTP can be unreliable. The first feature we'd like is to have some retry:
```go
hs := newHTTPSink(/*...*/)
retry := NewRetryingSink(hs, NewBreaker(5, time.Second))
```
We now have a sink that will retry events against the `httpSink` until they
succeed. The retry will backoff for one second after 5 consecutive failures
using the breaker strategy.
### Queues
This isn't quite enough. We we want a sink that doesn't block while we are
waiting for events to be sent. Let's add a `Queue`:
```go
queue := NewQueue(retry)
```
Now, we have an unbounded queue that will work through all events sent with
`(*Queue).Write`. Events can be added asynchronously to the queue without
blocking the current execution path. This is ideal for use in an http request.
### Broadcast
It usually turns out that you want to send to more than one listener. We can
use `Broadcaster` to support this:
```go
var broadcast = NewBroadcaster() // make it available somewhere in your application.
broadcast.Add(queue) // add your queue!
broadcast.Add(queue2) // and another!
```
With the above, we can now call `broadcast.Write` in our http handlers and have
all the events distributed to each queue. Because the events are queued, not
listener blocks another.
### Extending
For the most part, the above is sufficient for a lot of applications. However,
extending the above functionality can be done implementing your own `Sink`. The
behavior and semantics of the sink can be completely dependent on the
application requirements. The interface is provided below for reference:
```go
type Sink {
Write(Event) error
Close() error
}
```
Application behavior can be controlled by how `Write` behaves. The examples
above are designed to queue the message and return as quickly as possible.
Other implementations may block until the event is committed to durable
storage.
## Copyright and license
Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License,
Version 2.0. See [LICENSE](LICENSE) for the full license text.

View File

@ -1,178 +0,0 @@
package events
import (
"fmt"
"sync"
"github.com/sirupsen/logrus"
)
// Broadcaster sends events to multiple, reliable Sinks. The goal of this
// component is to dispatch events to configured endpoints. Reliability can be
// provided by wrapping incoming sinks.
type Broadcaster struct {
sinks []Sink
events chan Event
adds chan configureRequest
removes chan configureRequest
shutdown chan struct{}
closed chan struct{}
once sync.Once
}
// NewBroadcaster appends one or more sinks to the list of sinks. The
// broadcaster behavior will be affected by the properties of the sink.
// Generally, the sink should accept all messages and deal with reliability on
// its own. Use of EventQueue and RetryingSink should be used here.
func NewBroadcaster(sinks ...Sink) *Broadcaster {
b := Broadcaster{
sinks: sinks,
events: make(chan Event),
adds: make(chan configureRequest),
removes: make(chan configureRequest),
shutdown: make(chan struct{}),
closed: make(chan struct{}),
}
// Start the broadcaster
go b.run()
return &b
}
// Write accepts an event to be dispatched to all sinks. This method will never
// fail and should never block (hopefully!). The caller cedes the memory to the
// broadcaster and should not modify it after calling write.
func (b *Broadcaster) Write(event Event) error {
select {
case b.events <- event:
case <-b.closed:
return ErrSinkClosed
}
return nil
}
// Add the sink to the broadcaster.
//
// The provided sink must be comparable with equality. Typically, this just
// works with a regular pointer type.
func (b *Broadcaster) Add(sink Sink) error {
return b.configure(b.adds, sink)
}
// Remove the provided sink.
func (b *Broadcaster) Remove(sink Sink) error {
return b.configure(b.removes, sink)
}
type configureRequest struct {
sink Sink
response chan error
}
func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error {
response := make(chan error, 1)
for {
select {
case ch <- configureRequest{
sink: sink,
response: response}:
ch = nil
case err := <-response:
return err
case <-b.closed:
return ErrSinkClosed
}
}
}
// Close the broadcaster, ensuring that all messages are flushed to the
// underlying sink before returning.
func (b *Broadcaster) Close() error {
b.once.Do(func() {
close(b.shutdown)
})
<-b.closed
return nil
}
// run is the main broadcast loop, started when the broadcaster is created.
// Under normal conditions, it waits for events on the event channel. After
// Close is called, this goroutine will exit.
func (b *Broadcaster) run() {
defer close(b.closed)
remove := func(target Sink) {
for i, sink := range b.sinks {
if sink == target {
b.sinks = append(b.sinks[:i], b.sinks[i+1:]...)
break
}
}
}
for {
select {
case event := <-b.events:
for _, sink := range b.sinks {
if err := sink.Write(event); err != nil {
if err == ErrSinkClosed {
// remove closed sinks
remove(sink)
continue
}
logrus.WithField("event", event).WithField("events.sink", sink).WithError(err).
Errorf("broadcaster: dropping event")
}
}
case request := <-b.adds:
// while we have to iterate for add/remove, common iteration for
// send is faster against slice.
var found bool
for _, sink := range b.sinks {
if request.sink == sink {
found = true
break
}
}
if !found {
b.sinks = append(b.sinks, request.sink)
}
// b.sinks[request.sink] = struct{}{}
request.response <- nil
case request := <-b.removes:
remove(request.sink)
request.response <- nil
case <-b.shutdown:
// close all the underlying sinks
for _, sink := range b.sinks {
if err := sink.Close(); err != nil && err != ErrSinkClosed {
logrus.WithField("events.sink", sink).WithError(err).
Errorf("broadcaster: closing sink failed")
}
}
return
}
}
}
func (b *Broadcaster) String() string {
// Serialize copy of this broadcaster without the sync.Once, to avoid
// a data race.
b2 := map[string]interface{}{
"sinks": b.sinks,
"events": b.events,
"adds": b.adds,
"removes": b.removes,
"shutdown": b.shutdown,
"closed": b.closed,
}
return fmt.Sprint(b2)
}

View File

@ -1,61 +0,0 @@
package events
import (
"fmt"
"sync"
)
// Channel provides a sink that can be listened on. The writer and channel
// listener must operate in separate goroutines.
//
// Consumers should listen on Channel.C until Closed is closed.
type Channel struct {
C chan Event
closed chan struct{}
once sync.Once
}
// NewChannel returns a channel. If buffer is zero, the channel is
// unbuffered.
func NewChannel(buffer int) *Channel {
return &Channel{
C: make(chan Event, buffer),
closed: make(chan struct{}),
}
}
// Done returns a channel that will always proceed once the sink is closed.
func (ch *Channel) Done() chan struct{} {
return ch.closed
}
// Write the event to the channel. Must be called in a separate goroutine from
// the listener.
func (ch *Channel) Write(event Event) error {
select {
case ch.C <- event:
return nil
case <-ch.closed:
return ErrSinkClosed
}
}
// Close the channel sink.
func (ch *Channel) Close() error {
ch.once.Do(func() {
close(ch.closed)
})
return nil
}
func (ch *Channel) String() string {
// Serialize a copy of the Channel that doesn't contain the sync.Once,
// to avoid a data race.
ch2 := map[string]interface{}{
"C": ch.C,
"closed": ch.closed,
}
return fmt.Sprint(ch2)
}

View File

@ -1,10 +0,0 @@
package events
import "fmt"
var (
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
ErrSinkClosed = fmt.Errorf("events: sink closed")
)

View File

@ -1,15 +0,0 @@
package events
// Event marks items that can be sent as events.
type Event interface{}
// Sink accepts and sends events.
type Sink interface {
// Write an event to the Sink. If no error is returned, the caller will
// assume that all events have been committed to the sink. If an error is
// received, the caller may retry sending the event.
Write(event Event) error
// Close the sink, possibly waiting for pending events to flush.
Close() error
}

View File

@ -1,52 +0,0 @@
package events
// Matcher matches events.
type Matcher interface {
Match(event Event) bool
}
// MatcherFunc implements matcher with just a function.
type MatcherFunc func(event Event) bool
// Match calls the wrapped function.
func (fn MatcherFunc) Match(event Event) bool {
return fn(event)
}
// Filter provides an event sink that sends only events that are accepted by a
// Matcher. No methods on filter are goroutine safe.
type Filter struct {
dst Sink
matcher Matcher
closed bool
}
// NewFilter returns a new filter that will send to events to dst that return
// true for Matcher.
func NewFilter(dst Sink, matcher Matcher) Sink {
return &Filter{dst: dst, matcher: matcher}
}
// Write an event to the filter.
func (f *Filter) Write(event Event) error {
if f.closed {
return ErrSinkClosed
}
if f.matcher.Match(event) {
return f.dst.Write(event)
}
return nil
}
// Close the filter and allow no more events to pass through.
func (f *Filter) Close() error {
// TODO(stevvooe): Not all sinks should have Close.
if f.closed {
return nil
}
f.closed = true
return f.dst.Close()
}

View File

@ -1,111 +0,0 @@
package events
import (
"container/list"
"sync"
"github.com/sirupsen/logrus"
)
// Queue accepts all messages into a queue for asynchronous consumption
// by a sink. It is unbounded and thread safe but the sink must be reliable or
// events will be dropped.
type Queue struct {
dst Sink
events *list.List
cond *sync.Cond
mu sync.Mutex
closed bool
}
// NewQueue returns a queue to the provided Sink dst.
func NewQueue(dst Sink) *Queue {
eq := Queue{
dst: dst,
events: list.New(),
}
eq.cond = sync.NewCond(&eq.mu)
go eq.run()
return &eq
}
// Write accepts the events into the queue, only failing if the queue has
// been closed.
func (eq *Queue) Write(event Event) error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return ErrSinkClosed
}
eq.events.PushBack(event)
eq.cond.Signal() // signal waiters
return nil
}
// Close shutsdown the event queue, flushing
func (eq *Queue) Close() error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return nil
}
// set closed flag
eq.closed = true
eq.cond.Signal() // signal flushes queue
eq.cond.Wait() // wait for signal from last flush
return eq.dst.Close()
}
// run is the main goroutine to flush events to the target sink.
func (eq *Queue) run() {
for {
event := eq.next()
if event == nil {
return // nil block means event queue is closed.
}
if err := eq.dst.Write(event); err != nil {
// TODO(aaronl): Dropping events could be bad depending
// on the application. We should have a way of
// communicating this condition. However, logging
// at a log level above debug may not be appropriate.
// Eventually, go-events should not use logrus at all,
// and should bubble up conditions like this through
// error values.
logrus.WithFields(logrus.Fields{
"event": event,
"sink": eq.dst,
}).WithError(err).Debug("eventqueue: dropped event")
}
}
}
// next encompasses the critical section of the run loop. When the queue is
// empty, it will block on the condition. If new data arrives, it will wake
// and return a block. When closed, a nil slice will be returned.
func (eq *Queue) next() Event {
eq.mu.Lock()
defer eq.mu.Unlock()
for eq.events.Len() < 1 {
if eq.closed {
eq.cond.Broadcast()
return nil
}
eq.cond.Wait()
}
front := eq.events.Front()
block := front.Value.(Event)
eq.events.Remove(front)
return block
}

View File

@ -1,260 +0,0 @@
package events
import (
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
)
// RetryingSink retries the write until success or an ErrSinkClosed is
// returned. Underlying sink must have p > 0 of succeeding or the sink will
// block. Retry is configured with a RetryStrategy. Concurrent calls to a
// retrying sink are serialized through the sink, meaning that if one is
// in-flight, another will not proceed.
type RetryingSink struct {
sink Sink
strategy RetryStrategy
closed chan struct{}
once sync.Once
}
// NewRetryingSink returns a sink that will retry writes to a sink, backing
// off on failure. Parameters threshold and backoff adjust the behavior of the
// circuit breaker.
func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink {
rs := &RetryingSink{
sink: sink,
strategy: strategy,
closed: make(chan struct{}),
}
return rs
}
// Write attempts to flush the events to the downstream sink until it succeeds
// or the sink is closed.
func (rs *RetryingSink) Write(event Event) error {
logger := logrus.WithField("event", event)
retry:
select {
case <-rs.closed:
return ErrSinkClosed
default:
}
if backoff := rs.strategy.Proceed(event); backoff > 0 {
select {
case <-time.After(backoff):
// TODO(stevvooe): This branch holds up the next try. Before, we
// would simply break to the "retry" label and then possibly wait
// again. However, this requires all retry strategies to have a
// large probability of probing the sync for success, rather than
// just backing off and sending the request.
case <-rs.closed:
return ErrSinkClosed
}
}
if err := rs.sink.Write(event); err != nil {
if err == ErrSinkClosed {
// terminal!
return err
}
logger := logger.WithError(err) // shadow!!
if rs.strategy.Failure(event, err) {
logger.Errorf("retryingsink: dropped event")
return nil
}
logger.Errorf("retryingsink: error writing event, retrying")
goto retry
}
rs.strategy.Success(event)
return nil
}
// Close closes the sink and the underlying sink.
func (rs *RetryingSink) Close() error {
rs.once.Do(func() {
close(rs.closed)
})
return nil
}
func (rs *RetryingSink) String() string {
// Serialize a copy of the RetryingSink without the sync.Once, to avoid
// a data race.
rs2 := map[string]interface{}{
"sink": rs.sink,
"strategy": rs.strategy,
"closed": rs.closed,
}
return fmt.Sprint(rs2)
}
// RetryStrategy defines a strategy for retrying event sink writes.
//
// All methods should be goroutine safe.
type RetryStrategy interface {
// Proceed is called before every event send. If proceed returns a
// positive, non-zero integer, the retryer will back off by the provided
// duration.
//
// An event is provided, by may be ignored.
Proceed(event Event) time.Duration
// Failure reports a failure to the strategy. If this method returns true,
// the event should be dropped.
Failure(event Event, err error) bool
// Success should be called when an event is sent successfully.
Success(event Event)
}
// Breaker implements a circuit breaker retry strategy.
//
// The current implementation never drops events.
type Breaker struct {
threshold int
recent int
last time.Time
backoff time.Duration // time after which we retry after failure.
mu sync.Mutex
}
var _ RetryStrategy = &Breaker{}
// NewBreaker returns a breaker that will backoff after the threshold has been
// tripped. A Breaker is thread safe and may be shared by many goroutines.
func NewBreaker(threshold int, backoff time.Duration) *Breaker {
return &Breaker{
threshold: threshold,
backoff: backoff,
}
}
// Proceed checks the failures against the threshold.
func (b *Breaker) Proceed(event Event) time.Duration {
b.mu.Lock()
defer b.mu.Unlock()
if b.recent < b.threshold {
return 0
}
return b.last.Add(b.backoff).Sub(time.Now())
}
// Success resets the breaker.
func (b *Breaker) Success(event Event) {
b.mu.Lock()
defer b.mu.Unlock()
b.recent = 0
b.last = time.Time{}
}
// Failure records the failure and latest failure time.
func (b *Breaker) Failure(event Event, err error) bool {
b.mu.Lock()
defer b.mu.Unlock()
b.recent++
b.last = time.Now().UTC()
return false // never drop events.
}
var (
// DefaultExponentialBackoffConfig provides a default configuration for
// exponential backoff.
DefaultExponentialBackoffConfig = ExponentialBackoffConfig{
Base: time.Second,
Factor: time.Second,
Max: 20 * time.Second,
}
)
// ExponentialBackoffConfig configures backoff parameters.
//
// Note that these parameters operate on the upper bound for choosing a random
// value. For example, at Base=1s, a random value in [0,1s) will be chosen for
// the backoff value.
type ExponentialBackoffConfig struct {
// Base is the minimum bound for backing off after failure.
Base time.Duration
// Factor sets the amount of time by which the backoff grows with each
// failure.
Factor time.Duration
// Max is the absolute maxiumum bound for a single backoff.
Max time.Duration
}
// ExponentialBackoff implements random backoff with exponentially increasing
// bounds as the number consecutive failures increase.
type ExponentialBackoff struct {
config ExponentialBackoffConfig
failures uint64 // consecutive failure counter.
}
// NewExponentialBackoff returns an exponential backoff strategy with the
// desired config. If config is nil, the default is returned.
func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff {
return &ExponentialBackoff{
config: config,
}
}
// Proceed returns the next randomly bound exponential backoff time.
func (b *ExponentialBackoff) Proceed(event Event) time.Duration {
return b.backoff(atomic.LoadUint64(&b.failures))
}
// Success resets the failures counter.
func (b *ExponentialBackoff) Success(event Event) {
atomic.StoreUint64(&b.failures, 0)
}
// Failure increments the failure counter.
func (b *ExponentialBackoff) Failure(event Event, err error) bool {
atomic.AddUint64(&b.failures, 1)
return false
}
// backoff calculates the amount of time to wait based on the number of
// consecutive failures.
func (b *ExponentialBackoff) backoff(failures uint64) time.Duration {
if failures <= 0 {
// proceed normally when there are no failures.
return 0
}
factor := b.config.Factor
if factor <= 0 {
factor = DefaultExponentialBackoffConfig.Factor
}
backoff := b.config.Base + factor*time.Duration(1<<(failures-1))
max := b.config.Max
if max <= 0 {
max = DefaultExponentialBackoffConfig.Max
}
if backoff > max || backoff < 0 {
backoff = max
}
// Choose a uniformly distributed value from [0, backoff).
return time.Duration(rand.Int63n(int64(backoff)))
}

View File

@ -68,9 +68,21 @@ If you need to use a unit but it is not defined in the package please open a PR
Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
## HTTP Metrics
To instrument a http handler, you can wrap the code like this:
```go
namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
httpMetrics := namespace.NewDefaultHttpMetrics()
metrics.Register(namespace)
instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
```
Note: The `handler` label must be provided when a new namespace is created.
## Additional Metrics
Additional metrics are also defined here that are not avaliable in the prometheus client.
Additional metrics are also defined here that are not available in the prometheus client.
If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.

View File

@ -4,10 +4,71 @@ import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// HTTPHandlerOpts describes a set of configurable options of http metrics
type HTTPHandlerOpts struct {
DurationBuckets []float64
RequestSizeBuckets []float64
ResponseSizeBuckets []float64
}
const (
InstrumentHandlerResponseSize = iota
InstrumentHandlerRequestSize
InstrumentHandlerDuration
InstrumentHandlerCounter
InstrumentHandlerInFlight
)
type HTTPMetric struct {
prometheus.Collector
handlerType int
}
var (
defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
defaultResponseSizeBuckets = defaultRequestSizeBuckets
)
// Handler returns the global http.Handler that provides the prometheus
// metrics format on GET requests
// metrics format on GET requests. This handler is no longer instrumented.
func Handler() http.Handler {
return prometheus.Handler()
return promhttp.Handler()
}
func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
}
func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
var handler http.Handler
handler = http.HandlerFunc(handlerFunc)
for _, metric := range metrics {
switch metric.handlerType {
case InstrumentHandlerResponseSize:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
}
case InstrumentHandlerRequestSize:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
}
case InstrumentHandlerDuration:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerDuration(collector, handler)
}
case InstrumentHandlerCounter:
if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
handler = promhttp.InstrumentHandlerCounter(collector, handler)
}
case InstrumentHandlerInFlight:
if collector, ok := metric.Collector.(prometheus.Gauge); ok {
handler = promhttp.InstrumentHandlerInFlight(collector, handler)
}
}
}
return handler.ServeHTTP
}

View File

@ -179,3 +179,137 @@ func makeName(name string, unit Unit) string {
return fmt.Sprintf("%s_%s", name, unit)
}
func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
DurationBuckets: defaultDurationBuckets,
RequestSizeBuckets: defaultResponseSizeBuckets,
ResponseSizeBuckets: defaultResponseSizeBuckets,
})
}
func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
DurationBuckets: durationBuckets,
RequestSizeBuckets: requestSizeBuckets,
ResponseSizeBuckets: responseSizeBuckets,
})
}
func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
var httpMetrics []*HTTPMetric
inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
requestTotalMetric := n.NewRequestTotalMetric(handlerName)
requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
return httpMetrics
}
func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
metric := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "in_flight_requests",
Help: "The in-flight HTTP requests",
ConstLabels: prometheus.Labels(labels),
})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerInFlight,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
metric := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "requests_total",
Help: "Total number of HTTP requests made.",
ConstLabels: prometheus.Labels(labels),
},
[]string{"code", "method"},
)
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerCounter,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("DurationBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "request_duration_seconds",
Help: "The HTTP request latencies in seconds.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metric := prometheus.NewHistogramVec(opts, []string{"method"})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerDuration,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("RequestSizeBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "request_size_bytes",
Help: "The HTTP request sizes in bytes.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metric := prometheus.NewHistogramVec(opts, []string{})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerRequestSize,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("ResponseSizeBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "response_size_bytes",
Help: "The HTTP response sizes in bytes.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metrics := prometheus.NewHistogramVec(opts, []string{})
httpMetric := &HTTPMetric{
Collector: metrics,
handlerType: InstrumentHandlerResponseSize,
}
n.Add(httpMetric)
return httpMetric
}

View File

@ -28,15 +28,27 @@ type Timer interface {
// LabeledTimer is a timer that must have label values populated before use.
type LabeledTimer interface {
WithValues(labels ...string) Timer
WithValues(labels ...string) *labeledTimerObserver
}
type labeledTimer struct {
m *prometheus.HistogramVec
}
func (lt *labeledTimer) WithValues(labels ...string) Timer {
return &timer{m: lt.m.WithLabelValues(labels...)}
type labeledTimerObserver struct {
m prometheus.Observer
}
func (lbo *labeledTimerObserver) Update(duration time.Duration) {
lbo.m.Observe(duration.Seconds())
}
func (lbo *labeledTimerObserver) UpdateSince(since time.Time) {
lbo.m.Observe(time.Since(since).Seconds())
}
func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver {
return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)}
}
func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) {
@ -48,7 +60,7 @@ func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) {
}
type timer struct {
m prometheus.Histogram
m prometheus.Observer
}
func (t *timer) Update(duration time.Duration) {
@ -60,9 +72,14 @@ func (t *timer) UpdateSince(since time.Time) {
}
func (t *timer) Describe(c chan<- *prometheus.Desc) {
t.m.Describe(c)
c <- t.m.(prometheus.Metric).Desc()
}
func (t *timer) Collect(c chan<- prometheus.Metric) {
t.m.Collect(c)
// Are there any observers that don't implement Collector? It is really
// unclear what the point of the upstream change was, but we'll let this
// panic if we get an observer that doesn't implement collector. In this
// case, we should almost always see metricVec objects, so this should
// never panic.
t.m.(prometheus.Collector).Collect(c)
}