diff --git a/commands/history/inspect.go b/commands/history/inspect.go index b79eb401..0dbfe21e 100644 --- a/commands/history/inspect.go +++ b/commands/history/inspect.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" "text/tabwriter" + "text/template" "time" "github.com/containerd/containerd/v2/core/content" @@ -25,6 +26,7 @@ import ( "github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/desktop" "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/cli/debug" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" @@ -45,9 +47,114 @@ import ( proto "google.golang.org/protobuf/proto" ) +type statusT string + +const ( + statusComplete statusT = "completed" + statusRunning statusT = "running" + statusError statusT = "failed" + statusCanceled statusT = "canceled" +) + type inspectOptions struct { builder string ref string + format string +} + +type inspectOutput struct { + Name string `json:",omitempty"` + Ref string + + Context string `json:",omitempty"` + Dockerfile string `json:",omitempty"` + VCSRepository string `json:",omitempty"` + VCSRevision string `json:",omitempty"` + Target string `json:",omitempty"` + Platform []string `json:",omitempty"` + KeepGitDir bool `json:",omitempty"` + + NamedContexts []keyValueOutput `json:",omitempty"` + + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + Status statusT `json:",omitempty"` + Error *errorOutput `json:",omitempty"` + + NumCompletedSteps int32 + NumTotalSteps int32 + NumCachedSteps int32 + + BuildArgs []keyValueOutput `json:",omitempty"` + Labels []keyValueOutput `json:",omitempty"` + + Config configOutput `json:",omitempty"` + + Materials []materialOutput `json:",omitempty"` + Attachments []attachmentOutput `json:",omitempty"` + + Errors []string `json:",omitempty"` +} + +type configOutput struct { + Network string `json:",omitempty"` + ExtraHosts []string `json:",omitempty"` + Hostname string `json:",omitempty"` + CgroupParent string `json:",omitempty"` + ImageResolveMode string `json:",omitempty"` + MultiPlatform bool `json:",omitempty"` + NoCache bool `json:",omitempty"` + NoCacheFilter []string `json:",omitempty"` + + ShmSize string `json:",omitempty"` + Ulimit string `json:",omitempty"` + CacheMountNS string `json:",omitempty"` + DockerfileCheckConfig string `json:",omitempty"` + SourceDateEpoch string `json:",omitempty"` + SandboxHostname string `json:",omitempty"` + + RestRaw []keyValueOutput `json:",omitempty"` +} + +type materialOutput struct { + URI string `json:",omitempty"` + Digests []string `json:",omitempty"` +} + +type attachmentOutput struct { + Digest string `json:",omitempty"` + Platform string `json:",omitempty"` + Type string `json:",omitempty"` +} + +type errorOutput struct { + Code int `json:",omitempty"` + Message string `json:",omitempty"` + Name string `json:",omitempty"` + Logs []string `json:",omitempty"` + Sources []byte `json:",omitempty"` + Stack []byte `json:",omitempty"` +} + +type keyValueOutput struct { + Name string `json:",omitempty"` + Value string `json:",omitempty"` +} + +func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) { + if sv, ok := attrs[k]; ok { + if f != nil { + v, ok := f(sv) + if ok { + *dest = v + } + } + if d, ok := any(dest).(*string); ok { + *d = sv + } + } + delete(attrs, k) } func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error { @@ -86,28 +193,36 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) rec := &recs[0] + c, err := rec.node.Driver.Client(ctx) + if err != nil { + return err + } + + store := proxy.NewContentStore(c.ContentClient()) + + var defaultPlatform string + workers, err := c.ListWorkers(ctx) + if err != nil { + return errors.Wrap(err, "failed to list workers") + } +workers0: + for _, w := range workers { + for _, p := range w.Platforms { + defaultPlatform = platforms.FormatAll(platforms.Normalize(p)) + break workers0 + } + } + ls, err := localstate.New(confutil.NewConfig(dockerCli)) if err != nil { return err } st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref) - tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) - attrs := rec.FrontendAttrs delete(attrs, "frontend.caps") - writeAttr := func(k, name string, f func(v string) (string, bool)) { - if v, ok := attrs[k]; ok { - if f != nil { - v, ok = f(v) - } - if ok { - fmt.Fprintf(tw, "%s:\t%s\n", name, v) - } - } - delete(attrs, k) - } + var out inspectOutput var context string var dockerfile string @@ -146,131 +261,171 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) } delete(attrs, "filename") - if context != "" { - fmt.Fprintf(tw, "Context:\t%s\n", context) - } - if dockerfile != "" { - fmt.Fprintf(tw, "Dockerfile:\t%s\n", dockerfile) - } + out.Name = buildName(rec.FrontendAttrs, st) + out.Ref = rec.Ref + + out.Context = context + out.Dockerfile = dockerfile + if _, ok := attrs["context"]; !ok { if src, ok := attrs["vcs:source"]; ok { - fmt.Fprintf(tw, "VCS Repository:\t%s\n", src) + out.VCSRepository = src } if rev, ok := attrs["vcs:revision"]; ok { - fmt.Fprintf(tw, "VCS Revision:\t%s\n", rev) + out.VCSRevision = rev } } - writeAttr("target", "Target", nil) - writeAttr("platform", "Platform", func(v string) (string, bool) { - return tryParseValue(v, func(v string) (string, error) { + readAttr(attrs, "target", &out.Target, nil) + + readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) { + return tryParseValue(v, &out.Errors, func(v string) ([]string, error) { var pp []string for _, v := range strings.Split(v, ",") { p, err := platforms.Parse(v) if err != nil { - return "", err + return nil, err } pp = append(pp, platforms.FormatAll(platforms.Normalize(p))) } - return strings.Join(pp, ", "), nil - }), true - }) - writeAttr("build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", "Keep Git Dir", func(v string) (string, bool) { - return tryParseValue(v, func(v string) (string, error) { - b, err := strconv.ParseBool(v) - if err != nil { - return "", err + if len(pp) == 0 { + pp = append(pp, defaultPlatform) } - return strconv.FormatBool(b), nil - }), true + return pp, nil + }) }) - tw.Flush() + readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) { + return tryParseValue(v, &out.Errors, strconv.ParseBool) + }) - fmt.Fprintln(dockerCli.Out()) + out.NamedContexts = readKeyValues(attrs, "context:") - printTable(dockerCli.Out(), attrs, "context:", "Named Context") - - tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) - - fmt.Fprintf(tw, "Started:\t%s\n", rec.CreatedAt.AsTime().Local().Format("2006-01-02 15:04:05")) - var duration time.Duration - var statusStr string - if rec.CompletedAt != nil { - duration = rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()) - } else { - duration = rec.currentTimestamp.Sub(rec.CreatedAt.AsTime()) - statusStr = " (running)" + if rec.CreatedAt != nil { + tm := rec.CreatedAt.AsTime().Local() + out.StartedAt = &tm } - fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(duration), statusStr) - if rec.Error != nil { - if codes.Code(rec.Error.Code) == codes.Canceled { - fmt.Fprintf(tw, "Status:\tCanceled\n") - } else { - fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message) + out.Status = statusRunning + + if rec.CompletedAt != nil { + tm := rec.CompletedAt.AsTime().Local() + out.CompletedAt = &tm + out.Status = statusComplete + } + + if rec.Error != nil || rec.ExternalError != nil { + out.Error = &errorOutput{} + if rec.Error != nil { + if codes.Code(rec.Error.Code) == codes.Canceled { + out.Status = statusCanceled + } else { + out.Status = statusError + } + out.Error.Code = int(codes.Code(rec.Error.Code)) + out.Error.Message = rec.Error.Message + } + if rec.ExternalError != nil { + dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError)) + if err != nil { + return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest) + } + var st spb.Status + if err := proto.Unmarshal(dt, &st); err != nil { + return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest) + } + retErr := grpcerrors.FromGRPC(status.ErrorProto(&st)) + var errsources bytes.Buffer + for _, s := range errdefs.Sources(retErr) { + s.Print(&errsources) + errsources.WriteString("\n") + } + out.Error.Sources = errsources.Bytes() + var ve *errdefs.VertexError + if errors.As(retErr, &ve) { + dgst, err := digest.Parse(ve.Vertex.Digest) + if err != nil { + return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest) + } + name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16) + if err != nil { + return errors.Wrapf(err, "failed to load vertex logs %s", dgst) + } + out.Error.Name = name + out.Error.Logs = logs + } + out.Error.Stack = []byte(fmt.Sprintf("%+v", stack.Formatter(retErr))) } } - fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", rec.NumCompletedSteps, rec.NumTotalSteps, float64(rec.NumCachedSteps)/float64(rec.NumTotalSteps)*100) - tw.Flush() - fmt.Fprintln(dockerCli.Out()) + if out.StartedAt != nil { + if out.CompletedAt != nil { + out.Duration = out.CompletedAt.Sub(*out.StartedAt) + } else { + out.Duration = rec.currentTimestamp.Sub(*out.StartedAt) + } + } - tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + out.NumCompletedSteps = rec.NumCompletedSteps + out.NumTotalSteps = rec.NumTotalSteps + out.NumCachedSteps = rec.NumCachedSteps - writeAttr("force-network-mode", "Network", nil) - writeAttr("hostname", "Hostname", nil) - writeAttr("add-hosts", "Extra Hosts", func(v string) (string, bool) { - return tryParseValue(v, func(v string) (string, error) { + out.BuildArgs = readKeyValues(attrs, "build-arg:") + out.Labels = readKeyValues(attrs, "label:") + + readAttr(attrs, "force-network-mode", &out.Config.Network, nil) + readAttr(attrs, "hostname", &out.Config.Hostname, nil) + readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil) + readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil) + readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) { + return tryParseValue(v, &out.Errors, strconv.ParseBool) + }) + readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) { + return tryParseValue(v, &out.Errors, strconv.ParseBool) + }) + readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) { + if v == "" { + return true, true + } + return false, false + }) + readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) { + if v == "" { + return nil, false + } + return strings.Split(v, ","), true + }) + + readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) { + return tryParseValue(v, &out.Errors, func(v string) ([]string, error) { fields, err := csvvalue.Fields(v, nil) if err != nil { - return "", err + return nil, err } - return strings.Join(fields, ", "), nil - }), true + return fields, nil + }) }) - writeAttr("cgroup-parent", "Cgroup Parent", nil) - writeAttr("image-resolve-mode", "Image Resolve Mode", nil) - writeAttr("multi-platform", "Force Multi-Platform", nil) - writeAttr("build-arg:BUILDKIT_MULTI_PLATFORM", "Force Multi-Platform", nil) - writeAttr("no-cache", "Disable Cache", func(v string) (string, bool) { - if v == "" { - return "true", true - } - return v, true - }) - writeAttr("shm-size", "Shm Size", nil) - writeAttr("ulimit", "Resource Limits", nil) - writeAttr("build-arg:BUILDKIT_CACHE_MOUNT_NS", "Cache Mount Namespace", nil) - writeAttr("build-arg:BUILDKIT_DOCKERFILE_CHECK", "Dockerfile Check Config", nil) - writeAttr("build-arg:SOURCE_DATE_EPOCH", "Source Date Epoch", nil) - writeAttr("build-arg:SANDBOX_HOSTNAME", "Sandbox Hostname", nil) - var unusedAttrs []string + readAttr(attrs, "shm-size", &out.Config.ShmSize, nil) + readAttr(attrs, "ulimit", &out.Config.Ulimit, nil) + readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil) + readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil) + readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil) + readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil) + + var unusedAttrs []keyValueOutput for k := range attrs { if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") { continue } - unusedAttrs = append(unusedAttrs, k) + unusedAttrs = append(unusedAttrs, keyValueOutput{ + Name: k, + Value: attrs[k], + }) } - slices.Sort(unusedAttrs) - - for _, k := range unusedAttrs { - fmt.Fprintf(tw, "%s:\t%s\n", k, attrs[k]) - } - - tw.Flush() - - fmt.Fprintln(dockerCli.Out()) - - printTable(dockerCli.Out(), attrs, "build-arg:", "Build Arg") - printTable(dockerCli.Out(), attrs, "label:", "Label") - - c, err := rec.node.Driver.Client(ctx) - if err != nil { - return err - } - - store := proxy.NewContentStore(c.ContentClient()) + slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int { + return cmp.Compare(a.Name, b.Name) + }) + out.Config.RestRaw = unusedAttrs attachments, err := allAttachments(ctx, store, *rec) if err != nil { @@ -282,81 +437,209 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) }) if provIndex != -1 { prov := attachments[provIndex] - dt, err := content.ReadBlob(ctx, store, prov.descr) if err != nil { return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err) } - var pred provenancetypes.ProvenancePredicate if err := json.Unmarshal(dt, &pred); err != nil { return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err) } - - fmt.Fprintln(dockerCli.Out(), "Materials:") - tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) - fmt.Fprintf(tw, "URI\tDIGEST\n") for _, m := range pred.Materials { - fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(digestSetToDigests(m.Digest), ", ")) + out.Materials = append(out.Materials, materialOutput{ + URI: m.URI, + Digests: digestSetToDigests(m.Digest), + }) } - tw.Flush() - fmt.Fprintln(dockerCli.Out()) } if len(attachments) > 0 { - fmt.Fprintf(tw, "Attachments:\n") - tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) - fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n") for _, a := range attachments { p := "" if a.platform != nil { p = platforms.FormatAll(*a.platform) } - fmt.Fprintf(tw, "%s\t%s\t%s\n", a.descr.Digest, p, descrType(a.descr)) + out.Attachments = append(out.Attachments, attachmentOutput{ + Digest: a.descr.Digest.String(), + Platform: p, + Type: descrType(a.descr), + }) + } + } + + if opts.format == formatter.JSONFormatKey { + enc := json.NewEncoder(dockerCli.Out()) + enc.SetIndent("", " ") + return enc.Encode(out) + } else if opts.format != formatter.PrettyFormatKey { + tmpl, err := template.New("inspect").Parse(opts.format) + if err != nil { + return errors.Wrapf(err, "failed to parse format template") + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, out); err != nil { + return errors.Wrapf(err, "failed to execute format template") + } + fmt.Fprintln(dockerCli.Out(), buf.String()) + return nil + } + + tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + + if out.Name != "" { + fmt.Fprintf(tw, "Name:\t%s\n", out.Name) + } + if opts.ref == "" && out.Ref != "" { + fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref) + } + if out.Context != "" { + fmt.Fprintf(tw, "Context:\t%s\n", out.Context) + } + if out.Dockerfile != "" { + fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile) + } + if out.VCSRepository != "" { + fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository) + } + if out.VCSRevision != "" { + fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision) + } + + if out.Target != "" { + fmt.Fprintf(tw, "Target:\t%s\n", out.Target) + } + + if len(out.Platform) > 0 { + fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", ")) + } + + if out.KeepGitDir { + fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir)) + } + + tw.Flush() + + fmt.Fprintln(dockerCli.Out()) + + printTable(dockerCli.Out(), out.NamedContexts, "Named Context") + + tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + + fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05")) + var statusStr string + if out.Status == statusRunning { + statusStr = " (running)" + } + fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr) + + if out.Status == statusError { + fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message) + } else if out.Status == statusCanceled { + fmt.Fprintf(tw, "Status:\tCanceled\n") + } + + fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100) + tw.Flush() + + fmt.Fprintln(dockerCli.Out()) + + tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + + if out.Config.Network != "" { + fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network) + } + if out.Config.Hostname != "" { + fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname) + } + if len(out.Config.ExtraHosts) > 0 { + fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", ")) + } + if out.Config.CgroupParent != "" { + fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent) + } + if out.Config.ImageResolveMode != "" { + fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode) + } + if out.Config.MultiPlatform { + fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform)) + } + if out.Config.NoCache { + fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache)) + } + if len(out.Config.NoCacheFilter) > 0 { + fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", ")) + } + + if out.Config.ShmSize != "" { + fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize) + } + if out.Config.Ulimit != "" { + fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit) + } + if out.Config.CacheMountNS != "" { + fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS) + } + if out.Config.DockerfileCheckConfig != "" { + fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig) + } + if out.Config.SourceDateEpoch != "" { + fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch) + } + if out.Config.SandboxHostname != "" { + fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname) + } + + for _, kv := range out.Config.RestRaw { + fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value) + } + + tw.Flush() + + fmt.Fprintln(dockerCli.Out()) + + printTable(dockerCli.Out(), out.BuildArgs, "Build Arg") + printTable(dockerCli.Out(), out.Labels, "Label") + + if len(out.Materials) > 0 { + fmt.Fprintln(dockerCli.Out(), "Materials:") + tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + fmt.Fprintf(tw, "URI\tDIGEST\n") + for _, m := range out.Materials { + fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", ")) } tw.Flush() fmt.Fprintln(dockerCli.Out()) } - if rec.ExternalError != nil { - dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError)) - if err != nil { - return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest) - } - var st spb.Status - if err := proto.Unmarshal(dt, &st); err != nil { - return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest) - } - retErr := grpcerrors.FromGRPC(status.ErrorProto(&st)) - for _, s := range errdefs.Sources(retErr) { - s.Print(dockerCli.Out()) + if len(out.Attachments) > 0 { + fmt.Fprintf(tw, "Attachments:\n") + tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0) + fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n") + for _, a := range out.Attachments { + fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type) } + tw.Flush() fmt.Fprintln(dockerCli.Out()) + } - var ve *errdefs.VertexError - if errors.As(retErr, &ve) { - dgst, err := digest.Parse(ve.Vertex.Digest) - if err != nil { - return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest) - } - name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16) - if err != nil { - return errors.Wrapf(err, "failed to load vertex logs %s", dgst) - } - if len(logs) > 0 { - fmt.Fprintln(dockerCli.Out(), "Logs:") - fmt.Fprintf(dockerCli.Out(), "> => %s:\n", name) - for _, l := range logs { - fmt.Fprintln(dockerCli.Out(), "> "+l) - } - fmt.Fprintln(dockerCli.Out()) - } + if out.Error != nil { + if out.Error.Sources != nil { + fmt.Fprint(dockerCli.Out(), string(out.Error.Sources)) } - - if debug.IsEnabled() { - fmt.Fprintf(dockerCli.Out(), "\n%+v\n", stack.Formatter(retErr)) - } else if len(stack.Traces(retErr)) > 0 { - fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n") + if len(out.Error.Logs) > 0 { + fmt.Fprintln(dockerCli.Out(), "Logs:") + fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name) + for _, l := range out.Error.Logs { + fmt.Fprintln(dockerCli.Out(), "> "+l) + } + fmt.Fprintln(dockerCli.Out()) + } + if len(out.Error.Stack) > 0 { + if debug.IsEnabled() { + fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack) + } else { + fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n") + } } } @@ -388,7 +671,8 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command { attachmentCmd(dockerCli, rootOpts), ) - // flags := cmd.Flags() + flags := cmd.Flags() + flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output") return cmd } @@ -565,36 +849,48 @@ func descrType(desc ocispecs.Descriptor) string { return desc.MediaType } -func tryParseValue(s string, f func(string) (string, error)) string { +func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) { v, err := f(s) if err != nil { - return fmt.Sprintf("%s (%v)", s, err) + errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err) + *errs = append(*errs, errStr) } - return v + return v, true } -func printTable(w io.Writer, attrs map[string]string, prefix, title string) { - var keys []string - for k := range attrs { - if strings.HasPrefix(k, prefix) { - keys = append(keys, strings.TrimPrefix(k, prefix)) - } - } - slices.Sort(keys) - - if len(keys) == 0 { +func printTable(w io.Writer, kvs []keyValueOutput, title string) { + if len(kvs) == 0 { return } tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0) fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title)) - for _, k := range keys { - fmt.Fprintf(tw, "%s\t%s\n", k, attrs[prefix+k]) + for _, k := range kvs { + fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value) } tw.Flush() fmt.Fprintln(w) } +func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput { + var out []keyValueOutput + for k, v := range attrs { + if strings.HasPrefix(k, prefix) { + out = append(out, keyValueOutput{ + Name: strings.TrimPrefix(k, prefix), + Value: v, + }) + } + } + if len(out) == 0 { + return nil + } + slices.SortFunc(out, func(a, b keyValueOutput) int { + return cmp.Compare(a.Name, b.Name) + }) + return out +} + func digestSetToDigests(ds slsa.DigestSet) []string { var out []string for k, v := range ds { diff --git a/docs/reference/buildx_history_inspect.md b/docs/reference/buildx_history_inspect.md index d3d6637a..bfad9661 100644 --- a/docs/reference/buildx_history_inspect.md +++ b/docs/reference/buildx_history_inspect.md @@ -12,11 +12,106 @@ Inspect a build ### Options -| Name | Type | Default | Description | -|:----------------|:---------|:--------|:-----------------------------------------| -| `--builder` | `string` | | Override the configured builder instance | -| `-D`, `--debug` | `bool` | | Enable debug logging | +| Name | Type | Default | Description | +|:----------------------|:---------|:---------|:-----------------------------------------| +| `--builder` | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | +| [`--format`](#format) | `string` | `pretty` | Format the output | +## Examples + +### Format the output (--format) + +The formatting options (`--format`) pretty-prints the output to `pretty` (default), +`json` or using a Go template. + +```console +$ docker buildx history inspect +Name: buildx (binaries) +Context: . +Dockerfile: Dockerfile +VCS Repository: https://github.com/crazy-max/buildx.git +VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361 +Target: binaries +Platforms: linux/amd64 +Keep Git Dir: true + +Started: 2025-02-07 11:56:24 +Duration: 1m 1s +Build Steps: 16/16 (25% cached) + +Image Resolve Mode: local + +Materials: +URI DIGEST +pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25 +pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037 +pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3 + +Attachments: +DIGEST PLATFORM TYPE +sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2 + +Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b +``` + +```console +$ docker buildx history inspect --format json +{ + "Name": "buildx (binaries)", + "Ref": "5w7vkqfi0rf59hw4hnmn627r9", + "Context": ".", + "Dockerfile": "Dockerfile", + "VCSRepository": "https://github.com/crazy-max/buildx.git", + "VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361", + "Target": "binaries", + "Platform": [ + "linux/amd64" + ], + "KeepGitDir": true, + "StartedAt": "2025-02-07T12:01:05.75807272+01:00", + "CompletedAt": "2025-02-07T12:02:07.991778875+01:00", + "Duration": 62233706155, + "Status": "completed", + "NumCompletedSteps": 16, + "NumTotalSteps": 16, + "NumCachedSteps": 4, + "Config": { + "ImageResolveMode": "local" + }, + "Materials": [ + { + "URI": "pkg:docker/docker/dockerfile@1", + "Digests": [ + "sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25" + ] + }, + { + "URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64", + "Digests": [ + "sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037" + ] + }, + { + "URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64", + "Digests": [ + "sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3" + ] + } + ], + "Attachments": [ + { + "Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1", + "Type": "https://slsa.dev/provenance/v0.2" + } + ] +} +``` + +```console +$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})" +buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361) +```