mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-14 15:55:54 +08:00
Compare commits
114 Commits
v0.16
...
v0.17.0-rc
Author | SHA1 | Date | |
---|---|---|---|
![]() |
40ef3446f5 | ||
![]() |
7213b2a814 | ||
![]() |
9cfa25ab40 | ||
![]() |
6db3444a25 | ||
![]() |
15e930b691 | ||
![]() |
abc5eaed88 | ||
![]() |
ad9a5196b3 | ||
![]() |
db117855da | ||
![]() |
ecfe98df6f | ||
![]() |
479177eaf9 | ||
![]() |
194f523fe1 | ||
![]() |
29d367bdd4 | ||
![]() |
ed341bafd0 | ||
![]() |
c887c2c62a | ||
![]() |
7c481aae20 | ||
![]() |
f0f8876902 | ||
![]() |
fa1d19bb1e | ||
![]() |
7bea00f3dd | ||
![]() |
83d5c0c61b | ||
![]() |
e58a1d35d1 | ||
![]() |
b920b08ad3 | ||
![]() |
f369377d74 | ||
![]() |
b7486e5cd5 | ||
![]() |
5ecff53e0c | ||
![]() |
48faab5890 | ||
![]() |
f77866f5b4 | ||
![]() |
203fd8aee5 | ||
![]() |
806ccd3545 | ||
![]() |
d6e030eda7 | ||
![]() |
96eb69aea4 | ||
![]() |
d1d8d6e19c | ||
![]() |
dc7f679ab1 | ||
![]() |
e403ab2d63 | ||
![]() |
b6a2c96926 | ||
![]() |
7a7a9c8e01 | ||
![]() |
fa8f859159 | ||
![]() |
8411a763d9 | ||
![]() |
6c5279da54 | ||
![]() |
0e64eb4f8b | ||
![]() |
adbcc2225e | ||
![]() |
e00efeb399 | ||
![]() |
d03c13b947 | ||
![]() |
4787b5c046 | ||
![]() |
1c66f293c7 | ||
![]() |
246a36d463 | ||
![]() |
a4adae3d6b | ||
![]() |
36cd88f8ca | ||
![]() |
07a85a544b | ||
![]() |
f64b85afe6 | ||
![]() |
4b27fb3022 | ||
![]() |
38a8261f05 | ||
![]() |
a3e6f4be15 | ||
![]() |
6467a86427 | ||
![]() |
58571ff6d6 | ||
![]() |
71174c3041 | ||
![]() |
16860e6dd2 | ||
![]() |
8e02b1a2f7 | ||
![]() |
531c6d4ff1 | ||
![]() |
238a3e03dd | ||
![]() |
9a0c320588 | ||
![]() |
acf0216292 | ||
![]() |
5a50d13641 | ||
![]() |
2810f20f3a | ||
![]() |
e2f6808457 | ||
![]() |
39bbb9e478 | ||
![]() |
771f0139ac | ||
![]() |
6034c58285 | ||
![]() |
199890ff51 | ||
![]() |
d391b1d3e6 | ||
![]() |
f4da6b8f69 | ||
![]() |
386d599309 | ||
![]() |
d130f8ef0a | ||
![]() |
b691a10379 | ||
![]() |
e628f9ea14 | ||
![]() |
0fb0b6db0d | ||
![]() |
6efb1d7cdc | ||
![]() |
bc2748da59 | ||
![]() |
d4c4632cf6 | ||
![]() |
cdd46af015 | ||
![]() |
b62d64b2b5 | ||
![]() |
64171cb13e | ||
![]() |
f28dff7598 | ||
![]() |
3d542f3d31 | ||
![]() |
30dbdcfa3e | ||
![]() |
16518091cd | ||
![]() |
897fc91802 | ||
![]() |
c4d3011a98 | ||
![]() |
a47f761c55 | ||
![]() |
aa35c954f3 | ||
![]() |
56df4e98a0 | ||
![]() |
9f00a9eafa | ||
![]() |
56cb197c0a | ||
![]() |
466006849a | ||
![]() |
738f5ee9db | ||
![]() |
9b49cf3ae6 | ||
![]() |
bd0b425734 | ||
![]() |
7823a2dc01 | ||
![]() |
cedbc5d68d | ||
![]() |
12d431d1b4 | ||
![]() |
ca452c47d8 | ||
![]() |
d8f26f79ed | ||
![]() |
4304d388ef | ||
![]() |
96509847b9 | ||
![]() |
52bb668085 | ||
![]() |
85cf3bace9 | ||
![]() |
b92bfb53d2 | ||
![]() |
6c929a45c7 | ||
![]() |
d296d5d46a | ||
![]() |
6e433da23f | ||
![]() |
3005743f7c | ||
![]() |
d64d3a4caf | ||
![]() |
0d37d68efd | ||
![]() |
03a691a0a5 | ||
![]() |
fa392a2dca |
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
@@ -11,5 +11,5 @@ updates:
|
||||
# trigger a new version: https://github.com/docker/buildx/pull/2222#issuecomment-1919092153
|
||||
- dependency-name: "docker/docs"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "area/dependencies"
|
||||
- "bot"
|
||||
|
65
.github/workflows/build.yml
vendored
65
.github/workflows/build.yml
vendored
@@ -21,6 +21,7 @@ on:
|
||||
env:
|
||||
BUILDX_VERSION: "latest"
|
||||
BUILDKIT_IMAGE: "moby/buildkit:latest"
|
||||
SCOUT_VERSION: "1.11.0"
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
DESTDIR: "./bin"
|
||||
TEST_CACHE_SCOPE: "test"
|
||||
@@ -214,6 +215,36 @@ jobs:
|
||||
name: test-reports-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.TESTREPORTS_BASEDIR }}
|
||||
|
||||
govulncheck:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Run
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: govulncheck
|
||||
env:
|
||||
GOVULNCHECK_FORMAT: sarif
|
||||
-
|
||||
name: Upload SARIF report
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||
|
||||
prepare-binaries:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
@@ -328,6 +359,38 @@ jobs:
|
||||
*.cache-from=type=gha,scope=bin-image
|
||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||
|
||||
scout:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }}
|
||||
permissions:
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
needs:
|
||||
- bin-image
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERPUBLICBOT_USERNAME }}
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
-
|
||||
name: Scout
|
||||
id: scout
|
||||
uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4
|
||||
with:
|
||||
version: ${{ env.SCOUT_VERSION }}
|
||||
format: sarif
|
||||
image: registry://${{ env.REPO_SLUG }}:master
|
||||
-
|
||||
name: Upload SARIF report
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ steps.scout.outputs.result-file }}
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
@@ -359,7 +422,7 @@ jobs:
|
||||
-
|
||||
name: GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@a74c6b72af54cfa997e81df42d94703d6313a2d0 # v2.0.6
|
||||
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
2
.github/workflows/docs-release.yml
vendored
2
.github/workflows/docs-release.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
|
||||
-
|
||||
name: Create PR on docs repo
|
||||
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0
|
||||
uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v7.0.1
|
||||
with:
|
||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
||||
push-to-fork: docker-tools-robot/docker.github.io
|
||||
|
2
.github/workflows/labeler.yml
vendored
2
.github/workflows/labeler.yml
vendored
@@ -17,3 +17,5 @@ jobs:
|
||||
-
|
||||
name: Run
|
||||
uses: actions/labeler@v5
|
||||
with:
|
||||
sync-labels: true
|
||||
|
@@ -1,12 +1,8 @@
|
||||
run:
|
||||
timeout: 30m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
|
||||
modules-download-mode: vendor
|
||||
|
||||
build-tags:
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- gofmt
|
||||
@@ -57,6 +53,8 @@ linters-settings:
|
||||
G306: "0644"
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- ".*\\.pb\\.go$"
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- revive
|
||||
@@ -77,6 +75,6 @@ issues:
|
||||
- revive
|
||||
text: "if-return"
|
||||
|
||||
# show all
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
# show all
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
14
.mailmap
14
.mailmap
@@ -1,11 +1,25 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com> <developerguy2@gmail.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com> <42341126+jaihwan104@users.noreply.github.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com> <quic_kralph@quicinc.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com> <shaun.b.thompson@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com> <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Talon Bowler <talon.bowler@docker.com> <nolat301@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
|
69
AUTHORS
69
AUTHORS
@@ -1,45 +1,112 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||
|
||||
accetto <34798830+accetto@users.noreply.github.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alex Couture-Beil <alex@earthly.dev>
|
||||
Andrew Haines <andrew.haines@zencargo.com>
|
||||
Andy Caldwell <andrew.caldwell@metaswitch.com>
|
||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||
Anthony Poschen <zanven42@gmail.com>
|
||||
Arnold Sobanski <arnold@l4g.dev>
|
||||
Artur Klauser <Artur.Klauser@computer.org>
|
||||
Batuhan Apaydın <developerguy2@gmail.com>
|
||||
Avi Deitcher <avi@deitcher.net>
|
||||
Batuhan Apaydın <batuhan.apaydin@trendyol.com>
|
||||
Ben Peachey <potherca@gmail.com>
|
||||
Bertrand Paquet <bertrand.paquet@gmail.com>
|
||||
Bin Du <bindu@microsoft.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bryce Lampe <bryce@pulumi.com>
|
||||
Cameron Adams <pnzreba@gmail.com>
|
||||
Christian Dupuis <cd@atomist.com>
|
||||
Cory Snider <csnider@mirantis.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
David Gageot <david.gageot@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Scott <dave@recoil.org>
|
||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
Devin Bayer <dev@doubly.so>
|
||||
Djordje Lukic <djordje.lukic@docker.com>
|
||||
Dmitry Makovey <dmakovey@gitlab.com>
|
||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||
Donghui Wang <977675308@qq.com>
|
||||
Doug Borg <dougborg@apple.com>
|
||||
Edgar Lee <edgarl@netflix.com>
|
||||
Eli Treuherz <et@arenko.group>
|
||||
Eliott Wiener <eliottwiener@gmail.com>
|
||||
Elran Shefer <elran.shefer@velocity.tech>
|
||||
faust <faustin@fala.red>
|
||||
Felipe Santos <felipecassiors@gmail.com>
|
||||
Felix de Souza <fdesouza@palantir.com>
|
||||
Fernando Miguel <github@FernandoMiguel.net>
|
||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||
gracenoah <gracenoahgh@gmail.com>
|
||||
Guillaume Lours <705411+glours@users.noreply.github.com>
|
||||
guoguangwu <guoguangwu@magic-shield.com>
|
||||
Hollow Man <hollowman@hollowman.ml>
|
||||
Ian King'ori <kingorim.ian@gmail.com>
|
||||
idnandre <andre@idntimes.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Isaac Gaskin <isaac.gaskin@circle.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
jaihwan104 <jaihwan104@woowahan.com>
|
||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||
Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com>
|
||||
Jonathan A. Sternberg <jonathan.sternberg@docker.com>
|
||||
Jonathan Piché <jpiche@coveo.com>
|
||||
Justin Chadwell <me@jedevc.com>
|
||||
Kenyon Ralph <kenyon@kenyonralph.com>
|
||||
khs1994 <khs1994@khs1994.com>
|
||||
Kijima Daigo <norimaking777@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Kotaro Adachi <k33asby@gmail.com>
|
||||
Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com>
|
||||
l00397676 <lujingxiao@huawei.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Laurent Goderre <laurent.goderre@docker.com>
|
||||
Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com>
|
||||
Mayeul Blanzat <mayeul.blanzat@datadoghq.com>
|
||||
Michal Augustyn <michal.augustyn@mail.com>
|
||||
Milas Bowman <milas.bowman@docker.com>
|
||||
Mitsuru Kariya <mitsuru.kariya@nttdata.com>
|
||||
Moleus <fafufuburr@gmail.com>
|
||||
Nick Santos <nick.santos@docker.com>
|
||||
Nick Sieger <nick@nicksieger.com>
|
||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Niklas Gehlen <niklas@namespacelabs.com>
|
||||
Patrick Van Stee <patrick@vanstee.me>
|
||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||
Phong Tran <tran.pho@northeastern.edu>
|
||||
Qasim Sarfraz <qasimsarfraz@microsoft.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
robertlestak <robert.lestak@umusic.com>
|
||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||
Sean P. Kane <spkane00@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||
Simon A. Eugster <simon.eu@gmail.com>
|
||||
Solomon Hykes <sh.github.6811@hykes.org>
|
||||
Sumner Warren <sumner.warren@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
Talon Bowler <talon.bowler@docker.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <tibor@docker.com>
|
||||
Tim Smith <tismith@rvohealth.com>
|
||||
Timofey Kirillov <timofey.kirillov@flant.com>
|
||||
Tyler Smith <tylerlwsmith@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Ulysses Souza <ulyssessouza@gmail.com>
|
||||
Usual Coder <34403413+Usual-Coder@users.noreply.github.com>
|
||||
Wang Jinglei <morlay.null@gmail.com>
|
||||
Wei <daviseago@gmail.com>
|
||||
Wojciech M <wmiedzybrodzki@outlook.com>
|
||||
Xiang Dai <764524258@qq.com>
|
||||
Zachary Povey <zachary.povey@autotrader.co.uk>
|
||||
zelahi <elahi.zuhayr@gmail.com>
|
||||
Zero <tobewhatwewant@gmail.com>
|
||||
zhyon404 <zhyong4@gmail.com>
|
||||
Zsolt <zsolt.szeberenyi@figured.com>
|
||||
|
@@ -4,7 +4,8 @@ ARG GO_VERSION=1.22
|
||||
ARG XX_VERSION=1.4.0
|
||||
|
||||
# for testing
|
||||
ARG DOCKER_VERSION=27.0.3
|
||||
ARG DOCKER_VERSION=27.1.1
|
||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||
ARG GOTESTSUM_VERSION=v1.9.0
|
||||
ARG REGISTRY_VERSION=2.8.0
|
||||
ARG BUILDKIT_VERSION=v0.14.1
|
||||
@@ -13,7 +14,7 @@ ARG UNDOCK_VERSION=0.7.0
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
|
||||
FROM dockereng/cli-bin:$DOCKER_VERSION AS docker-cli
|
||||
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
|
||||
FROM registry:$REGISTRY_VERSION AS registry
|
||||
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
|
||||
FROM crazymax/undock:$UNDOCK_VERSION AS undock
|
||||
|
453
PROJECT.md
Normal file
453
PROJECT.md
Normal file
@@ -0,0 +1,453 @@
|
||||
# Project processing guide <!-- omit from toc -->
|
||||
|
||||
- [Project scope](#project-scope)
|
||||
- [Labels](#labels)
|
||||
- [Global](#global)
|
||||
- [`area/`](#area)
|
||||
- [`exp/`](#exp)
|
||||
- [`impact/`](#impact)
|
||||
- [`kind/`](#kind)
|
||||
- [`needs/`](#needs)
|
||||
- [`priority/`](#priority)
|
||||
- [`status/`](#status)
|
||||
- [Types of releases](#types-of-releases)
|
||||
- [Feature releases](#feature-releases)
|
||||
- [Release Candidates](#release-candidates)
|
||||
- [Support Policy](#support-policy)
|
||||
- [Contributing to Releases](#contributing-to-releases)
|
||||
- [Patch releases](#patch-releases)
|
||||
- [Milestones](#milestones)
|
||||
- [Triage process](#triage-process)
|
||||
- [Verify essential information](#verify-essential-information)
|
||||
- [Classify the issue](#classify-the-issue)
|
||||
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
|
||||
- [Issue lifecyle](#issue-lifecyle)
|
||||
- [Examples](#examples)
|
||||
- [Submitting a bug](#submitting-a-bug)
|
||||
- [Pull request review process](#pull-request-review-process)
|
||||
- [Handling stalled issues and pull requests](#handling-stalled-issues-and-pull-requests)
|
||||
- [Moving to a discussion](#moving-to-a-discussion)
|
||||
- [Workflow automation](#workflow-automation)
|
||||
- [Exempting an issue/PR from stale bot processing](#exempting-an-issuepr-from-stale-bot-processing)
|
||||
- [Updating dependencies](#updating-dependencies)
|
||||
|
||||
---
|
||||
|
||||
## Project scope
|
||||
|
||||
**Docker Buildx** is a Docker CLI plugin designed to extend build capabilities using BuildKit. It provides advanced features for building container images, supporting multiple builder instances, multi-node builds, and high-level build constructs. Buildx enhances the Docker build process, making it more efficient and flexible, and is compatible with both Docker and Kubernetes environments. Key features include:
|
||||
|
||||
- **Familiar user experience:** Buildx offers a user experience similar to legacy docker build, ensuring a smooth transition from legacy commands
|
||||
- **Full BuildKit capabilities:** Leverage the full feature set of [`moby/buildkit`](https://github.com/moby/buildkit) when using the container driver
|
||||
- **Multiple builder instances:** Supports the use of multiple builder instances, allowing concurrent builds and effective management and monitoring of these builders.
|
||||
- **Multi-node builds:** Use multiple nodes to build cross-platform images
|
||||
- **Compose integration:** Build complex, multi-services files as defined in compose
|
||||
- **High-level build constructs via `bake`:** Introduces high-level build constructs for more complex build workflows
|
||||
- **In-container driver support:** Support in-container drivers for both Docker and Kubernetes environments to support isolation/security.
|
||||
|
||||
## Labels
|
||||
|
||||
Below are common groups, labels, and their intended usage to support issues, pull requests, and discussion processing.
|
||||
|
||||
### Global
|
||||
|
||||
General attributes that can apply to nearly any issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------- | ----------- | ------------------------------------------------------------------------- |
|
||||
| `bot` | Issues, PRs | Created by a bot |
|
||||
| `good first issue ` | Issues | Suitable for first-time contributors |
|
||||
| `help wanted` | Issues, PRs | Assistance requested |
|
||||
| `lgtm` | PRs | “Looks good to me” approval |
|
||||
| `stale` | Issues, PRs | The issue/PR has not had activity for a while |
|
||||
| `rotten` | Issues, PRs | The issue/PR has not had activity since being marked stale and was closed |
|
||||
| `frozen` | Issues, PRs | The issue/PR should be skipped by the stale-bot |
|
||||
| `dco/no` | PRs | The PR is missing a developer certificate of origin sign-off |
|
||||
|
||||
### `area/`
|
||||
|
||||
Area or component of the project affected. Please note that the table below may not be inclusive of all current options.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------------------ | ---------- | -------------------------- |
|
||||
| `area/bake` | Any | `bake` |
|
||||
| `area/bake/compose` | Any | `bake/compose` |
|
||||
| `area/build` | Any | `build` |
|
||||
| `area/builder` | Any | `builder` |
|
||||
| `area/buildkit` | Any | Relates to `moby/buildkit` |
|
||||
| `area/cache` | Any | `cache` |
|
||||
| `area/checks` | Any | `checks` |
|
||||
| `area/ci` | Any | Project CI |
|
||||
| `area/cli` | Any | `cli` |
|
||||
| `area/controller` | Any | `controller` |
|
||||
| `area/debug` | Any | `debug` |
|
||||
| `area/dependencies` | Any | Project dependencies |
|
||||
| `area/dockerfile` | Any | `dockerfile` |
|
||||
| `area/docs` | Any | `docs` |
|
||||
| `area/driver` | Any | `driver` |
|
||||
| `area/driver/docker` | Any | `driver/docker` |
|
||||
| `area/driver/docker-container` | Any | `driver/docker-container` |
|
||||
| `area/driver/kubernetes` | Any | `driver/kubernetes` |
|
||||
| `area/driver/remote` | Any | `driver/remote` |
|
||||
| `area/feature-parity` | Any | `feature-parity` |
|
||||
| `area/github-actions` | Any | `github-actions` |
|
||||
| `area/hack` | Any | Project hack/support |
|
||||
| `area/imagetools` | Any | `imagetools` |
|
||||
| `area/metrics` | Any | `metrics` |
|
||||
| `area/moby` | Any | Relates to `moby/moby` |
|
||||
| `area/project` | Any | Project support |
|
||||
| `area/qemu` | Any | `qemu` |
|
||||
| `area/tests` | Any | Project testing |
|
||||
| `area/windows` | Any | `windows` |
|
||||
|
||||
### `exp/`
|
||||
|
||||
Estimated experience level to complete the item
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ---------- | ------------------------------------------------------------------------------- |
|
||||
| `exp/beginner` | Issue | Suitable for contributors new to the project or technology stack |
|
||||
| `exp/intermediate` | Issue | Requires some familiarity with the project and technology |
|
||||
| `exp/expert` | Issue | Requires deep understanding and advanced skills with the project and technology |
|
||||
|
||||
### `impact/`
|
||||
|
||||
Potential impact areas of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| -------------------- | ---------- | -------------------------------------------------- |
|
||||
| `impact/breaking` | PR | Change is API-breaking |
|
||||
| `impact/changelog` | PR | When complete, the item should be in the changelog |
|
||||
| `impact/deprecation` | PR | Change is a deprecation of a feature |
|
||||
|
||||
|
||||
### `kind/`
|
||||
|
||||
The type of issue, pull request or discussion
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------------ | ----------------- | ------------------------------------------------------- |
|
||||
| `kind/bug` | Issue, PR | Confirmed bug |
|
||||
| `kind/chore` | Issue, PR | Project support tasks |
|
||||
| `kind/docs` | Issue, PR | Additions or modifications to the documentation |
|
||||
| `kind/duplicate` | Any | Duplicate of another item |
|
||||
| `kind/enhancement` | Any | Enhancement of an existing feature |
|
||||
| `kind/feature` | Any | A brand new feature |
|
||||
| `kind/maybe-bug` | Issue, PR | Unconfirmed bug, turns into kind/bug when confirmed |
|
||||
| `kind/proposal` | Issue, Discussion | A proposed major change |
|
||||
| `kind/refactor` | Issue, PR | Refactor of existing code |
|
||||
| `kind/support` | Any | A question, discussion, or other user support item |
|
||||
| `kind/tests` | Issue, PR | Additions or modifications to the project testing suite |
|
||||
|
||||
### `needs/`
|
||||
|
||||
Actions or missing requirements needed by the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------------- | ---------- | ----------------------------------------------------- |
|
||||
| `needs/assignee` | Issue, PR | Needs an assignee |
|
||||
| `needs/code-review` | PR | Needs review of code |
|
||||
| `needs/design-review` | Issue, PR | Needs review of design |
|
||||
| `needs/docs-review` | Issue, PR | Needs review by the documentation team |
|
||||
| `needs/docs-update` | Issue, PR | Needs an update to the docs |
|
||||
| `needs/follow-on-work` | Issue, PR | Needs follow-on work/PR |
|
||||
| `needs/issue` | PR | Needs an issue |
|
||||
| `needs/maintainer-decision` | Issue, PR | Needs maintainer discussion/decision before advancing |
|
||||
| `needs/milestone` | Issue, PR | Needs milestone assignment |
|
||||
| `needs/more-info` | Any | Needs more information from the author |
|
||||
| `needs/more-investigation` | Issue, PR | Needs further investigation |
|
||||
| `needs/priority` | Issue, PR | Needs priority assignment |
|
||||
| `needs/pull-request` | Issue | Needs a pull request |
|
||||
| `needs/rebase` | PR | Needs rebase to target branch |
|
||||
| `needs/reproduction` | Issue, PR | Needs reproduction steps |
|
||||
|
||||
### `priority/`
|
||||
|
||||
Level of urgency of a `kind/bug` issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| ------------- | ---------- | ----------------------------------------------------------------------- |
|
||||
| `priority/P0` | Issue, PR | Urgent: Security, critical bugs, blocking issues. |
|
||||
| `priority/P1` | Issue, PR | Important: This is a top priority and a must-have for the next release. |
|
||||
| `priority/P2` | Issue, PR | Normal: Default priority |
|
||||
|
||||
### `status/`
|
||||
|
||||
Current lifecycle state of the issue or pull request.
|
||||
|
||||
| Label | Applies to | Description |
|
||||
| --------------------- | ---------- | ---------------------------------------------------------------------- |
|
||||
| `status/accepted` | Issue, PR | The issue has been reviewed and accepted for implementation |
|
||||
| `status/active` | PR | The PR is actively being worked on by a maintainer or community member |
|
||||
| `status/blocked` | Issue, PR | The issue/PR is blocked from advancing to another status |
|
||||
| `status/do-not-merge` | PR | Should not be merged pending further review or changes |
|
||||
| `status/transfer` | Any | Transferred to another project |
|
||||
| `status/triage` | Any | The item needs to be sorted by maintainers |
|
||||
| `status/wontfix` | Issue, PR | The issue/PR will not be fixed or addressed as described |
|
||||
|
||||
## Types of releases
|
||||
|
||||
This project has feature releases, patch releases, and security releases.
|
||||
|
||||
### Feature releases
|
||||
|
||||
Feature releases are made from the development branch, followed by cutting a release branch for future patch releases, which may also occur during the code freeze period.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Users can expect 2-3 release candidate (RC) test releases prior to a feature release. The first RC is typically released about one to two weeks before the final release.
|
||||
|
||||
#### Support Policy
|
||||
|
||||
Once a new feature release is cut, support for the previous feature release is discontinued. An exception may be made for urgent security releases that occur shortly after a new feature release. Buildx does not offer LTS (Long-Term Support) releases.
|
||||
|
||||
#### Contributing to Releases
|
||||
|
||||
Anyone can request that an issue or PR be included in the next feature or patch release milestone, provided it meets the necessary requirements.
|
||||
|
||||
### Patch releases
|
||||
|
||||
Patch releases should only include the most critical patches. Stability is vital, so everyone should always use the latest patch release.
|
||||
|
||||
If a fix is needed but does not qualify for a patch release because of its code size or other criteria that make it too unpredictable, we will prioritize cutting a new feature release sooner rather than making an exception for backporting.
|
||||
|
||||
Following PRs are included in patch releases
|
||||
|
||||
- `priority/P0` fixes
|
||||
- `priority/P1` fixes, assuming maintainers don’t object because of the patch size
|
||||
- `priority/P2` fixes, only if (both required)
|
||||
- proposed by maintainer
|
||||
- the patch is trivial and self-contained
|
||||
- Documentation-only patches
|
||||
- Vendored dependency updates, only if:
|
||||
- Fixing (qualifying) bug or security issue in Buildx
|
||||
- The patch is small, else a forked version of the dependency with only the patches required
|
||||
|
||||
New features do not qualify for patch release.
|
||||
|
||||
## Milestones
|
||||
|
||||
Milestones are used to help identify what releases a contribution will be in.
|
||||
|
||||
- The `v0.next` milestone collects unblocked items planned for the next 2-3 feature releases but not yet assigned to a specific version milestone.
|
||||
- The `v0.backlog` milestone gathers all triaged items considered for the long-term (beyond the next 3 feature releases) or currently unfit for a future release due to certain conditions. These items may be blocked and need to be unblocked before progressing.
|
||||
|
||||
## Triage process
|
||||
|
||||
Triage provides an important way to contribute to an open-source project. When submitted without an issue this process applies to Pull Requests as well. Triage helps ensure work items are resolved quickly by:
|
||||
|
||||
- Ensuring the issue's intent and purpose are described precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took to arrive at the problem.
|
||||
- Giving a contributor the information they need before they commit to resolving an issue.
|
||||
- Lowering the issue count by preventing duplicate issues.
|
||||
- Streamlining the development process by preventing duplicate discussions.
|
||||
|
||||
If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. The same basic process should be applied upon receipt of a new issue.
|
||||
|
||||
1. Verify essential information
|
||||
2. Classify the issue
|
||||
3. Prioritizing the issue
|
||||
|
||||
### Verify essential information
|
||||
|
||||
Before advancing the triage process, ensure the issue contains all necessary information to be properly understood and assessed. The required information may vary by issue type, but typically includes the system environment, version numbers, reproduction steps, expected outcomes, and actual results.
|
||||
|
||||
- **Exercising Judgment**: Use your best judgment to assess the issue description’s completeness.
|
||||
- **Communicating Needs**: If the information provided is insufficient, kindly request additional details from the author. Explain that this information is crucial for clarity and resolution of the issue, and apply the `needs/more-information` label to indicate a response from the author is required.
|
||||
|
||||
### Classify the issue
|
||||
|
||||
An issue will typically have multiple labels. These are used to help communicate key information about context, requirements, and status. At a minimum, a properly classified issue should have:
|
||||
|
||||
- (Required) One or more [`area/*`](#area) labels
|
||||
- (Required) One [`kind/*`](#kind) label to indicate the type of issue
|
||||
- (Required if `kind/bug`) A [`priority/*`](#priority) label
|
||||
|
||||
When assigning a decision the following labels should be present:
|
||||
|
||||
- (Required) One [`status/*`](#status) label to indicate lifecycle status
|
||||
|
||||
Additional labels can provide more clarity:
|
||||
|
||||
- Zero or more [`needs/*`](#needs) labels to indicate missing items
|
||||
- Zero or more [`impact/*`](#impact) labels
|
||||
- One [`exp/*`](#exp) label
|
||||
|
||||
## Prioritization guidelines for `kind/bug`
|
||||
|
||||
When an issue or pull request of `kind/bug` is correctly categorized and attached to a milestone, the labels indicate the urgency with which it should be completed.
|
||||
|
||||
**priority/P0**
|
||||
|
||||
Fixing this item is the highest priority. A patch release will follow as soon as a patch is available and verified. This level is used exclusively for bugs.
|
||||
|
||||
Examples:
|
||||
|
||||
- Regression in a critical code path
|
||||
- Panic in a critical code path
|
||||
- Corruption in critical code path or rest of the system
|
||||
- Leaked zero-day critical security
|
||||
|
||||
**priority/P1**
|
||||
|
||||
Items with this label should be fixed with high priority and almost always included in a patch release. Unless waiting for another issue, patch releases should happen within a week. This level is not used for features or enhancements.
|
||||
|
||||
Examples:
|
||||
|
||||
- Any regression, panic
|
||||
- Measurable performance regression
|
||||
- A major bug in a new feature in the latest release
|
||||
- Incompatibility with upgraded external dependency
|
||||
|
||||
**priority/P2**
|
||||
|
||||
This is the default priority and is implied in the absence of a `priority/` label. Bugs with this priority should be included in the next feature release but may land in a patch release if they are ready and unlikely to impact other functionality adversely. Non-bug issues with this priority should also be included in the next feature release if they are available and ready.
|
||||
|
||||
Examples:
|
||||
|
||||
- Confirmed bugs
|
||||
- Bugs in non-default configurations
|
||||
- Most enhancements
|
||||
|
||||
## Issue lifecyle
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
create([New issue]) --> triage
|
||||
subgraph triage[Triage Loop]
|
||||
review[Review]
|
||||
end
|
||||
subgraph decision[Decision]
|
||||
accept[Accept]
|
||||
close[Close]
|
||||
end
|
||||
triage -- if accepted --> accept[Assign status, milestone]
|
||||
triage -- if rejected --> close[Assign status, close issue]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
#### Submitting a bug
|
||||
|
||||
To help illustrate the issue life cycle let’s walk through submitting an issue as a potential bug in CI that enters a feedback loop and is eventually accepted as P2 priority and placed on the backlog.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
|
||||
new([New issue])
|
||||
|
||||
subgraph triage[Triage]
|
||||
direction LR
|
||||
|
||||
create["Action: Submit issue via Bug form\nLabels: kind/maybe-bug, status/triage"]
|
||||
style create text-align:left
|
||||
|
||||
subgraph review[Review]
|
||||
direction TB
|
||||
classify["Action: Maintainer reviews issue, requests more info\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style classify text-align:left
|
||||
|
||||
update["Action: Author updates issue\nLabels: kind/maybe-bug, status/triage, needs/more-info, area/*"]
|
||||
style update text-align:left
|
||||
|
||||
classify --> update
|
||||
update --> classify
|
||||
end
|
||||
|
||||
create --> review
|
||||
end
|
||||
|
||||
subgraph decision[Decision]
|
||||
accept["Action: Maintainer reviews updates, accepts, assigns milestone\nLabels: kind/bug, priority/P2, status/accepted, area/*, impact/*"]
|
||||
style accept text-align: left
|
||||
end
|
||||
|
||||
new --> triage
|
||||
triage --> decision
|
||||
```
|
||||
|
||||
## Pull request review process
|
||||
|
||||
A thorough and timely review process for pull requests (PRs) is crucial for maintaining the integrity and quality of the project while fostering a collaborative environment.
|
||||
|
||||
- **Labeling**: Most labels should be inherited from a linked issue. If no issue is linked an extended review process may be required.
|
||||
- **Continuous Integration**: With few exceptions, it is crucial that all Continuous Integration (CI) workflows pass successfully.
|
||||
- **Draft Status**: Incomplete or long-running PRs should be placed in "Draft" status. They may revert to "Draft" status upon initial review if significant rework is required.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
triage([Triage])
|
||||
draft[Draft PR]
|
||||
review[PR Review]
|
||||
closed{{Close PR}}
|
||||
merge{{Merge PR}}
|
||||
|
||||
subgraph feedback1[Feedback Loop]
|
||||
draft
|
||||
end
|
||||
subgraph feedback2[Feedback Loop]
|
||||
review
|
||||
end
|
||||
|
||||
triage --> draft
|
||||
draft --> review
|
||||
review --> closed
|
||||
review --> draft
|
||||
review --> merge
|
||||
```
|
||||
|
||||
## Handling stalled issues and pull requests
|
||||
|
||||
Unfortunately, some issues or pull requests can remain inactive for extended periods. To mitigate this, automation is employed to prompt both the author and maintainers, ensuring that all contributions receive appropriate attention.
|
||||
|
||||
**For Authors:**
|
||||
|
||||
- **Closure of Inactive Items**: If your issue or PR becomes irrelevant or is no longer needed, please close it to help keep the project clean.
|
||||
- **Prompt Responses**: If additional information is requested, please respond promptly to facilitate progress.
|
||||
|
||||
**For Maintainers:**
|
||||
|
||||
- **Timely Responses**: Endeavor to address issues and PRs within a reasonable timeframe to keep the community actively engaged.
|
||||
- **Engagement with Stale Issues**: If an issue becomes stale due to maintainer inaction, re-engage with the author to reassess and revitalize the discussion.
|
||||
|
||||
**Stale and Rotten Policy:**
|
||||
|
||||
- An issue or PR will be labeled as **`stale`** after 14 calendar days of inactivity. If it remains inactive for another 30 days, it will be labeled as **`rotten`** and closed.
|
||||
- Authors whose issues or PRs have been closed are welcome to re-open them or create new ones and link to the original.
|
||||
|
||||
**Skipping Stale Processing:**
|
||||
|
||||
- To prevent an issue or PR from being marked as stale, label it as **`frozen`**.
|
||||
|
||||
**Exceptions to Stale Processing:**
|
||||
|
||||
- Issues or PRs marked as **`frozen`**.
|
||||
- Issues or PRs assigned to a milestone.
|
||||
|
||||
## Moving to a discussion
|
||||
|
||||
Sometimes, an issue or pull request may not be the appropriate medium for what is essentially a discussion. In such cases, the issue or PR will either be converted to a discussion or a new discussion will be created. The original item will then be labeled appropriately (**`kind/discussion`** or **`kind/question`**) and closed.
|
||||
|
||||
If you believe this conversion was made in error, please express your concerns in the new discussion thread. If necessary, a reversal to the original issue or PR format can be facilitated.
|
||||
|
||||
## Workflow automation
|
||||
|
||||
To help expedite common operations, avoid errors and reduce toil some workflow automation is used by the project. This can include:
|
||||
|
||||
- Stale issue or pull request processing
|
||||
- Auto-labeling actions
|
||||
- Auto-response actions
|
||||
- Label carry over from issue to pull request
|
||||
|
||||
### Exempting an issue/PR from stale bot processing
|
||||
|
||||
The stale item handling is configured in the [repository](link-to-config-file). To exempt an issue or PR from stale processing you can:
|
||||
|
||||
- Add the item to a milestone
|
||||
- Add the `frozen` label to the item
|
||||
|
||||
## Updating dependencies
|
||||
|
||||
- **Runtime Dependencies**: Use the latest stable release available when the first Release Candidate (RC) of a new feature release is cut. For patch releases, update to the latest corresponding patch release of the dependency.
|
||||
- **Other Dependencies**: Always permitted to update to the latest patch release in the development branch. Updates to a new feature release require justification, unless the dependency is outdated. Prefer tagged versions of dependencies unless a specific untagged commit is needed. Go modules should specify the lowest compatible version; there is no requirement to update all dependencies to their latest versions before cutting a new Buildx feature release.
|
||||
- **Patch Releases**: Vendored dependency updates are considered for patch releases, except in the rare cases specified previously.
|
||||
- **Security Considerations**: A security scanner report indicating a non-exploitable issue via Buildx does not justify backports.
|
@@ -56,8 +56,7 @@ For more information on how to use Buildx, see
|
||||
|
||||
Using `buildx` with Docker requires Docker engine 19.03 or newer.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> [!WARNING]
|
||||
> Using an incompatible version of Docker may result in unexpected behavior,
|
||||
> and will likely cause issues, especially when using Buildx builders with more
|
||||
> recent versions of BuildKit.
|
||||
@@ -75,8 +74,7 @@ Docker Engine package repositories contain Docker Buildx packages when installed
|
||||
|
||||
## Manual download
|
||||
|
||||
> **Important**
|
||||
>
|
||||
> [!IMPORTANT]
|
||||
> This section is for unattended installation of the buildx component. These
|
||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||
> installing buildx using manual download in production environments as they
|
||||
@@ -107,8 +105,7 @@ On Windows:
|
||||
* `C:\ProgramData\Docker\cli-plugins`
|
||||
* `C:\Program Files\Docker\cli-plugins`
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||
> ```shell
|
||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
|
25
bake/bake.go
25
bake/bake.go
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
@@ -542,7 +543,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
o := t[kk[1]]
|
||||
|
||||
switch keys[1] {
|
||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest":
|
||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
||||
if len(parts) == 2 {
|
||||
o.ArrValue = append(o.ArrValue, parts[1])
|
||||
}
|
||||
@@ -703,11 +704,12 @@ type Target struct {
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
||||
NetworkMode *string `json:"-" hcl:"-" cty:"-"`
|
||||
NetworkMode *string `json:"network" hcl:"network" cty:"network"`
|
||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
||||
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
||||
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
||||
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
||||
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
||||
|
||||
// linked is a private field to mark a target used as a linked one
|
||||
@@ -732,6 +734,12 @@ func (t *Target) normalize() {
|
||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
||||
t.Ulimits = removeDupes(t.Ulimits)
|
||||
|
||||
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
||||
t.Entitlements = append(t.Entitlements, "network.host")
|
||||
}
|
||||
|
||||
t.Entitlements = removeDupes(t.Entitlements)
|
||||
|
||||
for k, v := range t.Contexts {
|
||||
if v == "" {
|
||||
delete(t.Contexts, k)
|
||||
@@ -831,6 +839,9 @@ func (t *Target) Merge(t2 *Target) {
|
||||
if t2.Description != "" {
|
||||
t.Description = t2.Description
|
||||
}
|
||||
if t2.Entitlements != nil { // merge
|
||||
t.Entitlements = append(t.Entitlements, t2.Entitlements...)
|
||||
}
|
||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||
}
|
||||
|
||||
@@ -885,6 +896,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.Platforms = o.ArrValue
|
||||
case "output":
|
||||
t.Outputs = o.ArrValue
|
||||
case "entitlements":
|
||||
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
||||
case "annotations":
|
||||
t.Annotations = append(t.Annotations, o.ArrValue...)
|
||||
case "attest":
|
||||
@@ -901,6 +914,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.ShmSize = &value
|
||||
case "ulimits":
|
||||
t.Ulimits = o.ArrValue
|
||||
case "network":
|
||||
t.NetworkMode = &value
|
||||
case "pull":
|
||||
pull, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@@ -1313,7 +1328,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
}
|
||||
|
||||
if t.Call != nil {
|
||||
bo.PrintFunc = &build.PrintFunc{
|
||||
bo.CallFunc = &build.CallFunc{
|
||||
Name: *t.Call,
|
||||
}
|
||||
}
|
||||
@@ -1368,6 +1383,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
}
|
||||
bo.Ulimits = ulimits
|
||||
|
||||
for _, ent := range t.Entitlements {
|
||||
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
||||
}
|
||||
|
||||
return bo, nil
|
||||
}
|
||||
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -1726,3 +1727,132 @@ func TestAnnotations(t *testing.T) {
|
||||
require.Len(t, bo["app"].Exports, 1)
|
||||
require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
|
||||
}
|
||||
|
||||
func TestHCLEntitlements(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
entitlements = ["security.insecure", "network.host"]
|
||||
}`),
|
||||
}
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 2)
|
||||
require.Equal(t, "security.insecure", m["app"].Entitlements[0])
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
||||
|
||||
require.Len(t, bo["app"].Allow, 2)
|
||||
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
|
||||
}
|
||||
|
||||
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
}`),
|
||||
}
|
||||
|
||||
fp2 := File{
|
||||
Name: "docker-compose.yml",
|
||||
Data: []byte(
|
||||
`services:
|
||||
app:
|
||||
build:
|
||||
network: "host"
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 1)
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
func TestEntitlementsForNetHost(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
network = "host"
|
||||
}`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 1)
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[0])
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
func TestNetNone(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
dockerfile = "app.Dockerfile"
|
||||
network = "none"
|
||||
}`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g["default"].Targets)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Contains(t, m, "app")
|
||||
require.Len(t, m["app"].Entitlements, 0)
|
||||
require.Equal(t, "none", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 0)
|
||||
require.Equal(t, "none", bo["app"].NetworkMode)
|
||||
}
|
||||
|
175
bake/entitlements.go
Normal file
175
bake/entitlements.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package bake
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type EntitlementKey string
|
||||
|
||||
const (
|
||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||
EntitlementKeyFS EntitlementKey = "fs"
|
||||
EntitlementKeyImagePush EntitlementKey = "image.push"
|
||||
EntitlementKeyImageLoad EntitlementKey = "image.load"
|
||||
EntitlementKeyImage EntitlementKey = "image"
|
||||
EntitlementKeySSH EntitlementKey = "ssh"
|
||||
)
|
||||
|
||||
type EntitlementConf struct {
|
||||
NetworkHost bool
|
||||
SecurityInsecure bool
|
||||
FSRead []string
|
||||
FSWrite []string
|
||||
ImagePush []string
|
||||
ImageLoad []string
|
||||
SSH bool
|
||||
}
|
||||
|
||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||
var conf EntitlementConf
|
||||
for _, e := range in {
|
||||
switch e {
|
||||
case string(EntitlementKeyNetworkHost):
|
||||
conf.NetworkHost = true
|
||||
case string(EntitlementKeySecurityInsecure):
|
||||
conf.SecurityInsecure = true
|
||||
case string(EntitlementKeySSH):
|
||||
conf.SSH = true
|
||||
default:
|
||||
k, v, _ := strings.Cut(e, "=")
|
||||
switch k {
|
||||
case string(EntitlementKeyFSRead):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
case string(EntitlementKeyFSWrite):
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyFS):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
conf.FSWrite = append(conf.FSWrite, v)
|
||||
case string(EntitlementKeyImagePush):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
case string(EntitlementKeyImageLoad):
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
case string(EntitlementKeyImage):
|
||||
conf.ImagePush = append(conf.ImagePush, v)
|
||||
conf.ImageLoad = append(conf.ImageLoad, v)
|
||||
default:
|
||||
return conf, errors.Errorf("uknown entitlement key %q", k)
|
||||
}
|
||||
|
||||
// TODO: dedupe slices and parent paths
|
||||
}
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) {
|
||||
var expected EntitlementConf
|
||||
|
||||
for _, v := range m {
|
||||
if err := c.check(v, &expected); err != nil {
|
||||
return EntitlementConf{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return expected, nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||
for _, e := range bo.Allow {
|
||||
switch e {
|
||||
case entitlements.EntitlementNetworkHost:
|
||||
if !c.NetworkHost {
|
||||
expected.NetworkHost = true
|
||||
}
|
||||
case entitlements.EntitlementSecurityInsecure:
|
||||
if !c.SecurityInsecure {
|
||||
expected.SecurityInsecure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error {
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdin); err == nil {
|
||||
term = true
|
||||
}
|
||||
|
||||
var msgs []string
|
||||
var flags []string
|
||||
|
||||
if c.NetworkHost {
|
||||
msgs = append(msgs, " - Running build containers that can access host network")
|
||||
flags = append(flags, "network.host")
|
||||
}
|
||||
if c.SecurityInsecure {
|
||||
msgs = append(msgs, " - Running privileged containers that can make system changes")
|
||||
flags = append(flags, "security.insecure")
|
||||
}
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n")
|
||||
for _, m := range msgs {
|
||||
fmt.Fprintf(out, "%s\n", m)
|
||||
}
|
||||
|
||||
for i, f := range flags {
|
||||
flags[i] = "--allow=" + f
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||
} else {
|
||||
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
args := append([]string(nil), os.Args...)
|
||||
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
|
||||
args[0] = v
|
||||
}
|
||||
idx := slices.Index(args, "bake")
|
||||
|
||||
if idx != -1 {
|
||||
fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n")
|
||||
fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " "))
|
||||
}
|
||||
|
||||
if term {
|
||||
fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
answerCh := make(chan string, 1)
|
||||
go func() {
|
||||
answer, _, _ := reader.ReadLine()
|
||||
answerCh <- string(answer)
|
||||
close(answerCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case answer := <-answerCh:
|
||||
if strings.ToLower(string(answer)) == "y" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("additional privileges requested")
|
||||
}
|
@@ -75,7 +75,12 @@ type WithGetName interface {
|
||||
GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(hcl.Expression) hcl.Diagnostics) (string, error)
|
||||
}
|
||||
|
||||
var errUndefined = errors.New("undefined")
|
||||
// errUndefined is returned when a variable or function is not defined.
|
||||
type errUndefined struct{}
|
||||
|
||||
func (errUndefined) Error() string {
|
||||
return "undefined"
|
||||
}
|
||||
|
||||
func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map[string]struct{}, allowMissing bool) hcl.Diagnostics {
|
||||
fns, hcldiags := funcCalls(exp)
|
||||
@@ -85,7 +90,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
|
||||
for _, fn := range fns {
|
||||
if err := p.resolveFunction(ectx, fn); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -139,7 +144,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
}
|
||||
for _, block := range blocks {
|
||||
if err := p.resolveBlock(block, target); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -147,7 +152,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map
|
||||
}
|
||||
} else {
|
||||
if err := p.resolveValue(ectx, v.RootName()); err != nil {
|
||||
if allowMissing && errors.Is(err, errUndefined) {
|
||||
if allowMissing && errors.Is(err, errUndefined{}) {
|
||||
continue
|
||||
}
|
||||
return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr())
|
||||
@@ -169,7 +174,7 @@ func (p *parser) resolveFunction(ectx *hcl.EvalContext, name string) error {
|
||||
}
|
||||
f, ok := p.funcs[name]
|
||||
if !ok {
|
||||
return errors.Wrapf(errUndefined, "function %q does not exist", name)
|
||||
return errors.Wrapf(errUndefined{}, "function %q does not exist", name)
|
||||
}
|
||||
if _, ok := p.progressF[key(ectx, name)]; ok {
|
||||
return errors.Errorf("function cycle not allowed for %s", name)
|
||||
@@ -259,7 +264,7 @@ func (p *parser) resolveValue(ectx *hcl.EvalContext, name string) (err error) {
|
||||
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
||||
vr, ok := p.vars[name]
|
||||
if !ok {
|
||||
return errors.Wrapf(errUndefined, "variable %q does not exist", name)
|
||||
return errors.Wrapf(errUndefined{}, "variable %q does not exist", name)
|
||||
}
|
||||
def = vr.Default
|
||||
ectx = p.ectx
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package hclparser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||
@@ -9,174 +12,251 @@ import (
|
||||
"github.com/hashicorp/go-cty-funcs/uuid"
|
||||
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
||||
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||
)
|
||||
|
||||
var stdlibFunctions = map[string]function.Function{
|
||||
"absolute": stdlib.AbsoluteFunc,
|
||||
"add": stdlib.AddFunc,
|
||||
"and": stdlib.AndFunc,
|
||||
"base64decode": encoding.Base64DecodeFunc,
|
||||
"base64encode": encoding.Base64EncodeFunc,
|
||||
"bcrypt": crypto.BcryptFunc,
|
||||
"byteslen": stdlib.BytesLenFunc,
|
||||
"bytesslice": stdlib.BytesSliceFunc,
|
||||
"can": tryfunc.CanFunc,
|
||||
"ceil": stdlib.CeilFunc,
|
||||
"chomp": stdlib.ChompFunc,
|
||||
"chunklist": stdlib.ChunklistFunc,
|
||||
"cidrhost": cidr.HostFunc,
|
||||
"cidrnetmask": cidr.NetmaskFunc,
|
||||
"cidrsubnet": cidr.SubnetFunc,
|
||||
"cidrsubnets": cidr.SubnetsFunc,
|
||||
"coalesce": stdlib.CoalesceFunc,
|
||||
"coalescelist": stdlib.CoalesceListFunc,
|
||||
"compact": stdlib.CompactFunc,
|
||||
"concat": stdlib.ConcatFunc,
|
||||
"contains": stdlib.ContainsFunc,
|
||||
"convert": typeexpr.ConvertFunc,
|
||||
"csvdecode": stdlib.CSVDecodeFunc,
|
||||
"distinct": stdlib.DistinctFunc,
|
||||
"divide": stdlib.DivideFunc,
|
||||
"element": stdlib.ElementFunc,
|
||||
"equal": stdlib.EqualFunc,
|
||||
"flatten": stdlib.FlattenFunc,
|
||||
"floor": stdlib.FloorFunc,
|
||||
"format": stdlib.FormatFunc,
|
||||
"formatdate": stdlib.FormatDateFunc,
|
||||
"formatlist": stdlib.FormatListFunc,
|
||||
"greaterthan": stdlib.GreaterThanFunc,
|
||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
||||
"hasindex": stdlib.HasIndexFunc,
|
||||
"indent": stdlib.IndentFunc,
|
||||
"index": stdlib.IndexFunc,
|
||||
"indexof": indexOfFunc,
|
||||
"int": stdlib.IntFunc,
|
||||
"join": stdlib.JoinFunc,
|
||||
"jsondecode": stdlib.JSONDecodeFunc,
|
||||
"jsonencode": stdlib.JSONEncodeFunc,
|
||||
"keys": stdlib.KeysFunc,
|
||||
"length": stdlib.LengthFunc,
|
||||
"lessthan": stdlib.LessThanFunc,
|
||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
||||
"log": stdlib.LogFunc,
|
||||
"lookup": stdlib.LookupFunc,
|
||||
"lower": stdlib.LowerFunc,
|
||||
"max": stdlib.MaxFunc,
|
||||
"md5": crypto.Md5Func,
|
||||
"merge": stdlib.MergeFunc,
|
||||
"min": stdlib.MinFunc,
|
||||
"modulo": stdlib.ModuloFunc,
|
||||
"multiply": stdlib.MultiplyFunc,
|
||||
"negate": stdlib.NegateFunc,
|
||||
"not": stdlib.NotFunc,
|
||||
"notequal": stdlib.NotEqualFunc,
|
||||
"or": stdlib.OrFunc,
|
||||
"parseint": stdlib.ParseIntFunc,
|
||||
"pow": stdlib.PowFunc,
|
||||
"range": stdlib.RangeFunc,
|
||||
"regex_replace": stdlib.RegexReplaceFunc,
|
||||
"regex": stdlib.RegexFunc,
|
||||
"regexall": stdlib.RegexAllFunc,
|
||||
"replace": stdlib.ReplaceFunc,
|
||||
"reverse": stdlib.ReverseFunc,
|
||||
"reverselist": stdlib.ReverseListFunc,
|
||||
"rsadecrypt": crypto.RsaDecryptFunc,
|
||||
"sethaselement": stdlib.SetHasElementFunc,
|
||||
"setintersection": stdlib.SetIntersectionFunc,
|
||||
"setproduct": stdlib.SetProductFunc,
|
||||
"setsubtract": stdlib.SetSubtractFunc,
|
||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
||||
"setunion": stdlib.SetUnionFunc,
|
||||
"sha1": crypto.Sha1Func,
|
||||
"sha256": crypto.Sha256Func,
|
||||
"sha512": crypto.Sha512Func,
|
||||
"signum": stdlib.SignumFunc,
|
||||
"slice": stdlib.SliceFunc,
|
||||
"sort": stdlib.SortFunc,
|
||||
"split": stdlib.SplitFunc,
|
||||
"strlen": stdlib.StrlenFunc,
|
||||
"substr": stdlib.SubstrFunc,
|
||||
"subtract": stdlib.SubtractFunc,
|
||||
"timeadd": stdlib.TimeAddFunc,
|
||||
"timestamp": timestampFunc,
|
||||
"title": stdlib.TitleFunc,
|
||||
"trim": stdlib.TrimFunc,
|
||||
"trimprefix": stdlib.TrimPrefixFunc,
|
||||
"trimspace": stdlib.TrimSpaceFunc,
|
||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
||||
"try": tryfunc.TryFunc,
|
||||
"upper": stdlib.UpperFunc,
|
||||
"urlencode": encoding.URLEncodeFunc,
|
||||
"uuidv4": uuid.V4Func,
|
||||
"uuidv5": uuid.V5Func,
|
||||
"values": stdlib.ValuesFunc,
|
||||
"zipmap": stdlib.ZipmapFunc,
|
||||
type funcDef struct {
|
||||
name string
|
||||
fn function.Function
|
||||
factory func() function.Function
|
||||
}
|
||||
|
||||
var stdlibFunctions = []funcDef{
|
||||
{name: "absolute", fn: stdlib.AbsoluteFunc},
|
||||
{name: "add", fn: stdlib.AddFunc},
|
||||
{name: "and", fn: stdlib.AndFunc},
|
||||
{name: "base64decode", fn: encoding.Base64DecodeFunc},
|
||||
{name: "base64encode", fn: encoding.Base64EncodeFunc},
|
||||
{name: "basename", factory: basenameFunc},
|
||||
{name: "bcrypt", fn: crypto.BcryptFunc},
|
||||
{name: "byteslen", fn: stdlib.BytesLenFunc},
|
||||
{name: "bytesslice", fn: stdlib.BytesSliceFunc},
|
||||
{name: "can", fn: tryfunc.CanFunc},
|
||||
{name: "ceil", fn: stdlib.CeilFunc},
|
||||
{name: "chomp", fn: stdlib.ChompFunc},
|
||||
{name: "chunklist", fn: stdlib.ChunklistFunc},
|
||||
{name: "cidrhost", fn: cidr.HostFunc},
|
||||
{name: "cidrnetmask", fn: cidr.NetmaskFunc},
|
||||
{name: "cidrsubnet", fn: cidr.SubnetFunc},
|
||||
{name: "cidrsubnets", fn: cidr.SubnetsFunc},
|
||||
{name: "coalesce", fn: stdlib.CoalesceFunc},
|
||||
{name: "coalescelist", fn: stdlib.CoalesceListFunc},
|
||||
{name: "compact", fn: stdlib.CompactFunc},
|
||||
{name: "concat", fn: stdlib.ConcatFunc},
|
||||
{name: "contains", fn: stdlib.ContainsFunc},
|
||||
{name: "convert", fn: typeexpr.ConvertFunc},
|
||||
{name: "csvdecode", fn: stdlib.CSVDecodeFunc},
|
||||
{name: "dirname", factory: dirnameFunc},
|
||||
{name: "distinct", fn: stdlib.DistinctFunc},
|
||||
{name: "divide", fn: stdlib.DivideFunc},
|
||||
{name: "element", fn: stdlib.ElementFunc},
|
||||
{name: "equal", fn: stdlib.EqualFunc},
|
||||
{name: "flatten", fn: stdlib.FlattenFunc},
|
||||
{name: "floor", fn: stdlib.FloorFunc},
|
||||
{name: "format", fn: stdlib.FormatFunc},
|
||||
{name: "formatdate", fn: stdlib.FormatDateFunc},
|
||||
{name: "formatlist", fn: stdlib.FormatListFunc},
|
||||
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
|
||||
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
|
||||
{name: "hasindex", fn: stdlib.HasIndexFunc},
|
||||
{name: "indent", fn: stdlib.IndentFunc},
|
||||
{name: "index", fn: stdlib.IndexFunc},
|
||||
{name: "indexof", factory: indexOfFunc},
|
||||
{name: "int", fn: stdlib.IntFunc},
|
||||
{name: "join", fn: stdlib.JoinFunc},
|
||||
{name: "jsondecode", fn: stdlib.JSONDecodeFunc},
|
||||
{name: "jsonencode", fn: stdlib.JSONEncodeFunc},
|
||||
{name: "keys", fn: stdlib.KeysFunc},
|
||||
{name: "length", fn: stdlib.LengthFunc},
|
||||
{name: "lessthan", fn: stdlib.LessThanFunc},
|
||||
{name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc},
|
||||
{name: "log", fn: stdlib.LogFunc},
|
||||
{name: "lookup", fn: stdlib.LookupFunc},
|
||||
{name: "lower", fn: stdlib.LowerFunc},
|
||||
{name: "max", fn: stdlib.MaxFunc},
|
||||
{name: "md5", fn: crypto.Md5Func},
|
||||
{name: "merge", fn: stdlib.MergeFunc},
|
||||
{name: "min", fn: stdlib.MinFunc},
|
||||
{name: "modulo", fn: stdlib.ModuloFunc},
|
||||
{name: "multiply", fn: stdlib.MultiplyFunc},
|
||||
{name: "negate", fn: stdlib.NegateFunc},
|
||||
{name: "not", fn: stdlib.NotFunc},
|
||||
{name: "notequal", fn: stdlib.NotEqualFunc},
|
||||
{name: "or", fn: stdlib.OrFunc},
|
||||
{name: "parseint", fn: stdlib.ParseIntFunc},
|
||||
{name: "pow", fn: stdlib.PowFunc},
|
||||
{name: "range", fn: stdlib.RangeFunc},
|
||||
{name: "regex_replace", fn: stdlib.RegexReplaceFunc},
|
||||
{name: "regex", fn: stdlib.RegexFunc},
|
||||
{name: "regexall", fn: stdlib.RegexAllFunc},
|
||||
{name: "replace", fn: stdlib.ReplaceFunc},
|
||||
{name: "reverse", fn: stdlib.ReverseFunc},
|
||||
{name: "reverselist", fn: stdlib.ReverseListFunc},
|
||||
{name: "rsadecrypt", fn: crypto.RsaDecryptFunc},
|
||||
{name: "sanitize", factory: sanitizeFunc},
|
||||
{name: "sethaselement", fn: stdlib.SetHasElementFunc},
|
||||
{name: "setintersection", fn: stdlib.SetIntersectionFunc},
|
||||
{name: "setproduct", fn: stdlib.SetProductFunc},
|
||||
{name: "setsubtract", fn: stdlib.SetSubtractFunc},
|
||||
{name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc},
|
||||
{name: "setunion", fn: stdlib.SetUnionFunc},
|
||||
{name: "sha1", fn: crypto.Sha1Func},
|
||||
{name: "sha256", fn: crypto.Sha256Func},
|
||||
{name: "sha512", fn: crypto.Sha512Func},
|
||||
{name: "signum", fn: stdlib.SignumFunc},
|
||||
{name: "slice", fn: stdlib.SliceFunc},
|
||||
{name: "sort", fn: stdlib.SortFunc},
|
||||
{name: "split", fn: stdlib.SplitFunc},
|
||||
{name: "strlen", fn: stdlib.StrlenFunc},
|
||||
{name: "substr", fn: stdlib.SubstrFunc},
|
||||
{name: "subtract", fn: stdlib.SubtractFunc},
|
||||
{name: "timeadd", fn: stdlib.TimeAddFunc},
|
||||
{name: "timestamp", factory: timestampFunc},
|
||||
{name: "title", fn: stdlib.TitleFunc},
|
||||
{name: "trim", fn: stdlib.TrimFunc},
|
||||
{name: "trimprefix", fn: stdlib.TrimPrefixFunc},
|
||||
{name: "trimspace", fn: stdlib.TrimSpaceFunc},
|
||||
{name: "trimsuffix", fn: stdlib.TrimSuffixFunc},
|
||||
{name: "try", fn: tryfunc.TryFunc},
|
||||
{name: "upper", fn: stdlib.UpperFunc},
|
||||
{name: "urlencode", fn: encoding.URLEncodeFunc},
|
||||
{name: "uuidv4", fn: uuid.V4Func},
|
||||
{name: "uuidv5", fn: uuid.V5Func},
|
||||
{name: "values", fn: stdlib.ValuesFunc},
|
||||
{name: "zipmap", fn: stdlib.ZipmapFunc},
|
||||
}
|
||||
|
||||
// indexOfFunc constructs a function that finds the element index for a given
|
||||
// value in a list.
|
||||
var indexOfFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "list",
|
||||
Type: cty.DynamicPseudoType,
|
||||
func indexOfFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "list",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.Number),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||
}
|
||||
|
||||
if !args[0].IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
|
||||
if args[0].LengthInt() == 0 { // Easy path
|
||||
return cty.NilVal, errors.New("cannot search an empty list")
|
||||
}
|
||||
|
||||
for it := args[0].ElementIterator(); it.Next(); {
|
||||
i, v := it.Element()
|
||||
eq, err := stdlib.Equal(v, args[1])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
Type: function.StaticReturnType(cty.Number),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
|
||||
if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
|
||||
return cty.NilVal, errors.New("argument must be a list or tuple")
|
||||
}
|
||||
if !eq.IsKnown() {
|
||||
|
||||
if !args[0].IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
if eq.True() {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
|
||||
},
|
||||
})
|
||||
if args[0].LengthInt() == 0 { // Easy path
|
||||
return cty.NilVal, errors.New("cannot search an empty list")
|
||||
}
|
||||
|
||||
for it := args[0].ElementIterator(); it.Next(); {
|
||||
i, v := it.Element()
|
||||
eq, err := stdlib.Equal(v, args[1])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
if !eq.IsKnown() {
|
||||
return cty.UnknownVal(cty.Number), nil
|
||||
}
|
||||
if eq.True() {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// basenameFunc constructs a function that returns the last element of a path.
|
||||
func basenameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Base(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// dirnameFunc constructs a function that returns the directory of a path.
|
||||
func dirnameFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "path",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
return cty.StringVal(path.Dir(in)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore,
|
||||
// leaving only characters that are valid for a Bake target name.
|
||||
func sanitizeFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "name",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
in := args[0].AsString()
|
||||
// only [a-zA-Z0-9_-]+ is allowed
|
||||
var b strings.Builder
|
||||
for _, r := range in {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return cty.StringVal(b.String()), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
||||
//
|
||||
// This function was imported from terraform's datetime utilities.
|
||||
var timestampFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
func timestampFunc() function.Function {
|
||||
return function.New(&function.Spec{
|
||||
Params: []function.Parameter{},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func Stdlib() map[string]function.Function {
|
||||
funcs := make(map[string]function.Function, len(stdlibFunctions))
|
||||
for k, v := range stdlibFunctions {
|
||||
funcs[k] = v
|
||||
for _, v := range stdlibFunctions {
|
||||
if v.factory != nil {
|
||||
funcs[v.name] = v.factory()
|
||||
} else {
|
||||
funcs[v.name] = v.fn
|
||||
}
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package hclparser
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
@@ -34,16 +35,165 @@ func TestIndexOf(t *testing.T) {
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := indexOfFunc.Call([]cty.Value{test.input, test.key})
|
||||
if err != nil {
|
||||
if test.wantErr {
|
||||
return
|
||||
}
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !got.RawEquals(test.want) {
|
||||
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want)
|
||||
got, err := indexOfFunc().Call([]cty.Value{test.input, test.key})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasename(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("bar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("bar."),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("bar.."),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("bar..."),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := basenameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirname(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
wantErr bool
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal("."),
|
||||
},
|
||||
"slash": {
|
||||
input: cty.StringVal("/"),
|
||||
want: cty.StringVal("/"),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("/foo/bar"),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("/foo/bar."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("/foo/bar.."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("/foo/bar..."),
|
||||
want: cty.StringVal("/foo"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := dirnameFunc().Call([]cty.Value{test.input})
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
type testCase struct {
|
||||
input cty.Value
|
||||
want cty.Value
|
||||
}
|
||||
tests := map[string]testCase{
|
||||
"empty": {
|
||||
input: cty.StringVal(""),
|
||||
want: cty.StringVal(""),
|
||||
},
|
||||
"simple": {
|
||||
input: cty.StringVal("foo/bar"),
|
||||
want: cty.StringVal("foo_bar"),
|
||||
},
|
||||
"simple no slash": {
|
||||
input: cty.StringVal("foobar"),
|
||||
want: cty.StringVal("foobar"),
|
||||
},
|
||||
"dot": {
|
||||
input: cty.StringVal("foo/bar."),
|
||||
want: cty.StringVal("foo_bar_"),
|
||||
},
|
||||
"dotdot": {
|
||||
input: cty.StringVal("foo/bar.."),
|
||||
want: cty.StringVal("foo_bar__"),
|
||||
},
|
||||
"dotdotdot": {
|
||||
input: cty.StringVal("foo/bar..."),
|
||||
want: cty.StringVal("foo_bar___"),
|
||||
},
|
||||
"utf8": {
|
||||
input: cty.StringVal("foo/🍕bar"),
|
||||
want: cty.StringVal("foo__bar"),
|
||||
},
|
||||
"symbols": {
|
||||
input: cty.StringVal("foo/bar!@(ba+z)"),
|
||||
want: cty.StringVal("foo_bar___ba_z_"),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
name, test := name, test
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := sanitizeFunc().Call([]cty.Value{test.input})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
220
build/build.go
220
build/build.go
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -34,6 +35,7 @@ import (
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
@@ -44,15 +46,12 @@ import (
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
)
|
||||
|
||||
const (
|
||||
printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56"
|
||||
printLintFallbackImage = "docker.io/docker/dockerfile-upstream:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd"
|
||||
@@ -83,13 +82,13 @@ type Options struct {
|
||||
|
||||
Session []session.Attachable
|
||||
Linked bool // Linked marks this target as exclusively linked (not requested by the user).
|
||||
PrintFunc *PrintFunc
|
||||
CallFunc *CallFunc
|
||||
ProvenanceResponseMode confutil.MetadataProvenanceMode
|
||||
SourcePolicy *spb.Policy
|
||||
GroupRef string
|
||||
}
|
||||
|
||||
type PrintFunc struct {
|
||||
type CallFunc struct {
|
||||
Name string
|
||||
Format string
|
||||
IgnoreStatus bool
|
||||
@@ -98,7 +97,7 @@ type PrintFunc struct {
|
||||
type Inputs struct {
|
||||
ContextPath string
|
||||
DockerfilePath string
|
||||
InStream io.Reader
|
||||
InStream *SyncMultiReader
|
||||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
NamedContexts map[string]NamedContext
|
||||
@@ -170,7 +169,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
}
|
||||
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noPrintFunc(opt) {
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) {
|
||||
var noOutputTargets []string
|
||||
for name, opt := range opt {
|
||||
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
@@ -213,7 +212,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
for k, opt := range opt {
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
hasMobyDriver := false
|
||||
gitattrs, addVCSLocalDir, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("current commit information was not captured by the build")
|
||||
}
|
||||
@@ -230,16 +229,14 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, addVCSLocalDir, w, docker)
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range gitattrs {
|
||||
so.FrontendAttrs[k] = v
|
||||
}
|
||||
addGitAttrs(so)
|
||||
defers = append(defers, release)
|
||||
reqn = append(reqn, &reqForNode{
|
||||
resolvedNode: np,
|
||||
@@ -298,6 +295,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
}
|
||||
|
||||
sharedSessions, err := detectSharedMounts(ctx, reqForNodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedSessionsWG := map[string]*sync.WaitGroup{}
|
||||
|
||||
resp = map[string]*client.SolveResponse{}
|
||||
var respMu sync.Mutex
|
||||
results := waitmap.New()
|
||||
@@ -306,7 +309,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
childTargets := calculateChildTargets(reqForNodes, opt)
|
||||
|
||||
for k, opt := range opt {
|
||||
err := func(k string) error {
|
||||
err := func(k string) (err error) {
|
||||
opt := opt
|
||||
dps := drivers[k]
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
@@ -318,6 +321,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
baseCtx := ctx
|
||||
|
||||
if multiTarget {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "target %s", k)
|
||||
}()
|
||||
}
|
||||
|
||||
res := make([]*client.SolveResponse, len(dps))
|
||||
eg2, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
@@ -362,7 +371,37 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var done func()
|
||||
if sessions, ok := sharedSessions[node.Name]; ok {
|
||||
wg, ok := sharedSessionsWG[node.Name]
|
||||
if ok {
|
||||
wg.Add(1)
|
||||
} else {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
sharedSessionsWG[node.Name] = wg
|
||||
for _, s := range sessions {
|
||||
s := s
|
||||
eg.Go(func() error {
|
||||
return s.Run(baseCtx, c.Dialer())
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
for _, s := range sessions {
|
||||
s.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
done = wg.Done
|
||||
}
|
||||
|
||||
eg2.Go(func() error {
|
||||
if done != nil {
|
||||
defer done()
|
||||
}
|
||||
|
||||
pw = progress.ResetTime(pw)
|
||||
|
||||
if err := waitContextDeps(ctx, dp.driverIndex, results, so); err != nil {
|
||||
@@ -393,15 +432,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
defer func() { <-done }()
|
||||
|
||||
cc := c
|
||||
var printRes map[string][]byte
|
||||
var callRes map[string][]byte
|
||||
buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
if opt.PrintFunc != nil {
|
||||
if opt.CallFunc != nil {
|
||||
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
||||
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
||||
} else {
|
||||
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
||||
}
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.CallFunc.Name
|
||||
}
|
||||
|
||||
res, err := c.Solve(ctx, req)
|
||||
@@ -417,8 +456,8 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opt.PrintFunc != nil {
|
||||
printRes = res.Metadata
|
||||
if opt.CallFunc != nil {
|
||||
callRes = res.Metadata
|
||||
}
|
||||
|
||||
rKey := resultKey(dp.driverIndex, k)
|
||||
@@ -474,13 +513,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
if rr.ExporterResponse == nil {
|
||||
rr.ExporterResponse = map[string]string{}
|
||||
}
|
||||
for k, v := range printRes {
|
||||
for k, v := range callRes {
|
||||
rr.ExporterResponse[k] = string(v)
|
||||
}
|
||||
rr.ExporterResponse["buildx.build.ref"] = buildRef
|
||||
if node.Driver.HistoryAPISupported(ctx) {
|
||||
if err := setRecordProvenance(ctx, c, rr, so.Ref, opt.ProvenanceResponseMode, pw); err != nil {
|
||||
return err
|
||||
if opt.CallFunc == nil {
|
||||
rr.ExporterResponse["buildx.build.ref"] = buildRef
|
||||
if node.Driver.HistoryAPISupported(ctx) {
|
||||
if err := setRecordProvenance(ctx, c, rr, so.Ref, opt.ProvenanceResponseMode, pw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -530,6 +571,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
tracing.FinishWithError(span, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if multiTarget {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "target %s", k)
|
||||
}()
|
||||
}
|
||||
|
||||
pw := progress.WithPrefix(w, "default", false)
|
||||
if err := eg2.Wait(); err != nil {
|
||||
return err
|
||||
@@ -793,6 +841,124 @@ func resultKey(index int, name string) string {
|
||||
return fmt.Sprintf("%d-%s", index, name)
|
||||
}
|
||||
|
||||
// detectSharedMounts looks for same local mounts used by multiple requests to the same node
|
||||
// and creates a separate session that will be used by all detected requests.
|
||||
func detectSharedMounts(ctx context.Context, reqs map[string][]*reqForNode) (_ map[string][]*session.Session, err error) {
|
||||
type fsTracker struct {
|
||||
fs fsutil.FS
|
||||
so []*client.SolveOpt
|
||||
}
|
||||
type fsKey struct {
|
||||
name string
|
||||
dir string
|
||||
}
|
||||
|
||||
m := map[string]map[fsKey]*fsTracker{}
|
||||
for _, reqs := range reqs {
|
||||
for _, req := range reqs {
|
||||
nodeName := req.resolvedNode.Node().Name
|
||||
if _, ok := m[nodeName]; !ok {
|
||||
m[nodeName] = map[fsKey]*fsTracker{}
|
||||
}
|
||||
fsMap := m[nodeName]
|
||||
for name, m := range req.so.LocalMounts {
|
||||
fs, ok := m.(*fs)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key := fsKey{name: name, dir: fs.dir}
|
||||
if _, ok := fsMap[key]; !ok {
|
||||
fsMap[key] = &fsTracker{fs: fs.FS}
|
||||
}
|
||||
fsMap[key].so = append(fsMap[key].so, req.so)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sharedSession struct {
|
||||
*session.Session
|
||||
fsMap map[string]fsutil.FS
|
||||
}
|
||||
|
||||
sessionMap := map[string][]*sharedSession{}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, sessions := range sessionMap {
|
||||
for _, s := range sessions {
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for node, fsMap := range m {
|
||||
for key, fs := range fsMap {
|
||||
if len(fs.so) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
sessions := sessionMap[node]
|
||||
|
||||
// find session that doesn't have the fs name reserved
|
||||
idx := slices.IndexFunc(sessions, func(s *sharedSession) bool {
|
||||
_, ok := s.fsMap[key.name]
|
||||
return !ok
|
||||
})
|
||||
|
||||
var ss *sharedSession
|
||||
if idx == -1 {
|
||||
s, err := session.NewSession(ctx, fs.so[0].SharedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss = &sharedSession{Session: s, fsMap: map[string]fsutil.FS{}}
|
||||
sessions = append(sessions, ss)
|
||||
sessionMap[node] = sessions
|
||||
} else {
|
||||
ss = sessions[idx]
|
||||
}
|
||||
|
||||
ss.fsMap[key.name] = fs.fs
|
||||
for _, so := range fs.so {
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = map[string]string{}
|
||||
}
|
||||
so.FrontendAttrs["local-sessionid:"+key.name] = ss.ID()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
|
||||
st.Uid = 0
|
||||
st.Gid = 0
|
||||
return fsutil.MapResultKeep
|
||||
}
|
||||
|
||||
// convert back to regular sessions
|
||||
sessions := map[string][]*session.Session{}
|
||||
for n, ss := range sessionMap {
|
||||
arr := make([]*session.Session, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
arr = append(arr, s.Session)
|
||||
|
||||
src := make(filesync.StaticDirSource, len(s.fsMap))
|
||||
for name, fs := range s.fsMap {
|
||||
fs, err := fsutil.NewFilterFS(fs, &fsutil.FilterOpt{
|
||||
Map: resetUIDAndGID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src[name] = fs
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncProvider(src))
|
||||
}
|
||||
sessions[n] = arr
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
// calculateChildTargets returns all the targets that depend on current target for reverse index
|
||||
func calculateChildTargets(reqs map[string][]*reqForNode, opt map[string]Options) map[string][]string {
|
||||
out := make(map[string][]string)
|
||||
@@ -940,9 +1106,9 @@ func fallbackPrintError(err error, req gateway.SolveRequest) (gateway.SolveReque
|
||||
return req, false
|
||||
}
|
||||
|
||||
func noPrintFunc(opt map[string]Options) bool {
|
||||
func noCallFunc(opt map[string]Options) bool {
|
||||
for _, v := range opt {
|
||||
if v.PrintFunc != nil {
|
||||
if v.CallFunc != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
71
build/git.go
71
build/git.go
@@ -17,10 +17,19 @@ import (
|
||||
|
||||
const DockerfileLabel = "com.docker.image.source.entrypoint"
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (map[string]string, func(key, dir string, so *client.SolveOpt), error) {
|
||||
res := make(map[string]string)
|
||||
type gitAttrsAppendFunc func(so *client.SolveOpt)
|
||||
|
||||
func gitAppendNoneFunc(_ *client.SolveOpt) {}
|
||||
|
||||
func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) {
|
||||
defer func() {
|
||||
if f == nil {
|
||||
f = gitAppendNoneFunc
|
||||
}
|
||||
}()
|
||||
|
||||
if contextPath == "" {
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
setGitLabels := false
|
||||
@@ -39,7 +48,7 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
}
|
||||
|
||||
if !setGitLabels && !setGitInfo {
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// figure out in which directory the git command needs to run in
|
||||
@@ -54,25 +63,27 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
|
||||
if err != nil {
|
||||
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
|
||||
return res, nil, errors.Wrap(err, "git was not found in the system")
|
||||
return nil, errors.Wrap(err, "git was not found in the system")
|
||||
}
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !gitc.IsInsideWorkTree() {
|
||||
if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() {
|
||||
return res, nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree")
|
||||
}
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
root, err := gitc.RootDir()
|
||||
if err != nil {
|
||||
return res, nil, errors.Wrap(err, "failed to get git root dir")
|
||||
return nil, errors.Wrap(err, "failed to get git root dir")
|
||||
}
|
||||
|
||||
res := make(map[string]string)
|
||||
|
||||
if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) {
|
||||
return res, nil, errors.Wrap(err, "failed to get git commit")
|
||||
return nil, errors.Wrap(err, "failed to get git commit")
|
||||
} else if sha != "" {
|
||||
checkDirty := false
|
||||
if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok {
|
||||
@@ -112,20 +123,38 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st
|
||||
}
|
||||
}
|
||||
|
||||
return res, func(key, dir string, so *client.SolveOpt) {
|
||||
return func(so *client.SolveOpt) {
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = make(map[string]string)
|
||||
}
|
||||
for k, v := range res {
|
||||
so.FrontendAttrs[k] = v
|
||||
}
|
||||
|
||||
if !setGitInfo || root == "" {
|
||||
return
|
||||
}
|
||||
dir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||
dir = lp
|
||||
}
|
||||
dir = osutil.SanitizePath(dir)
|
||||
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||
|
||||
for key, mount := range so.LocalMounts {
|
||||
fs, ok := mount.(*fs)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dir, err = filepath.Abs(dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if lp, err := osutil.GetLongPathName(dir); err == nil {
|
||||
dir = lp
|
||||
}
|
||||
dir = osutil.SanitizePath(dir)
|
||||
if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") {
|
||||
so.FrontendAttrs["vcs:localdir:"+key] = r
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ func setupTest(tb testing.TB) {
|
||||
}
|
||||
|
||||
func TestGetGitAttributesNotGitRepo(t *testing.T) {
|
||||
_, _, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||
_, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -39,16 +39,18 @@ func TestGetGitAttributesBadGitRepo(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
require.NoError(t, os.MkdirAll(path.Join(tmp, ".git"), 0755))
|
||||
|
||||
_, _, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
|
||||
_, err := getGitAttributes(context.Background(), tmp, "Dockerfile")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetGitAttributesNoContext(t *testing.T) {
|
||||
setupTest(t)
|
||||
|
||||
gitattrs, _, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile")
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, gitattrs)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
assert.Empty(t, so.FrontendAttrs)
|
||||
}
|
||||
|
||||
func TestGetGitAttributes(t *testing.T) {
|
||||
@@ -115,15 +117,17 @@ func TestGetGitAttributes(t *testing.T) {
|
||||
if tt.envGitInfo != "" {
|
||||
t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo)
|
||||
}
|
||||
gitattrs, _, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
for _, e := range tt.expected {
|
||||
assert.Contains(t, gitattrs, e)
|
||||
assert.NotEmpty(t, gitattrs[e])
|
||||
assert.Contains(t, so.FrontendAttrs, e)
|
||||
assert.NotEmpty(t, so.FrontendAttrs[e])
|
||||
if e == "label:"+DockerfileLabel {
|
||||
assert.Equal(t, "Dockerfile", gitattrs[e])
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs[e])
|
||||
} else if e == "label:"+specs.AnnotationSource || e == "vcs:source" {
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e])
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -140,20 +144,25 @@ func TestGetGitAttributesDirty(t *testing.T) {
|
||||
require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644))
|
||||
|
||||
t.Setenv("BUILDX_GIT_LABELS", "true")
|
||||
gitattrs, _, _ := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
assert.Equal(t, 5, len(gitattrs))
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, gitattrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
var so client.SolveOpt
|
||||
addGitAttrs(&so)
|
||||
|
||||
assert.Contains(t, gitattrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"])
|
||||
assert.Contains(t, gitattrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty"))
|
||||
assert.Equal(t, 5, len(so.FrontendAttrs))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel)
|
||||
assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource)
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource])
|
||||
assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision)
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty"))
|
||||
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:source")
|
||||
assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"])
|
||||
assert.Contains(t, so.FrontendAttrs, "vcs:revision")
|
||||
assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty"))
|
||||
}
|
||||
|
||||
func TestLocalDirs(t *testing.T) {
|
||||
@@ -163,15 +172,17 @@ func TestLocalDirs(t *testing.T) {
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
|
||||
_, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, addVCSLocalDir)
|
||||
|
||||
require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir))
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", ".", so))
|
||||
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.NoError(t, setLocalMount("dockerfile", ".", so, addVCSLocalDir))
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
}
|
||||
@@ -194,16 +205,17 @@ func TestLocalDirsSub(t *testing.T) {
|
||||
so := &client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{},
|
||||
}
|
||||
require.NoError(t, setLocalMount("context", ".", so))
|
||||
require.NoError(t, setLocalMount("dockerfile", "app", so))
|
||||
|
||||
_, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||
addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, addVCSLocalDir)
|
||||
|
||||
require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir))
|
||||
addGitAttrs(so)
|
||||
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:context")
|
||||
assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"])
|
||||
|
||||
require.NoError(t, setLocalMount("dockerfile", "app", so, addVCSLocalDir))
|
||||
require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile")
|
||||
assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"])
|
||||
}
|
||||
|
79
build/opt.go
79
build/opt.go
@@ -1,11 +1,12 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -34,7 +35,7 @@ import (
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
nodeDriver := node.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
@@ -157,7 +158,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
case 1:
|
||||
// valid
|
||||
case 0:
|
||||
if !noDefaultLoad() && opt.PrintFunc == nil {
|
||||
if !noDefaultLoad() && opt.CallFunc == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
// backwards compat for docker driver only:
|
||||
// this ensures the build results in a docker image.
|
||||
@@ -260,9 +261,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
}
|
||||
|
||||
so.Exports = opt.Exports
|
||||
so.Session = opt.Session
|
||||
so.Session = slices.Clone(opt.Session)
|
||||
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, addVCSLocalDir, pw, &so)
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -347,15 +348,15 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
||||
so.FrontendAttrs["ulimit"] = ulimits
|
||||
}
|
||||
|
||||
// mark info request as internal
|
||||
if opt.PrintFunc != nil {
|
||||
// mark call request as internal
|
||||
if opt.CallFunc != nil {
|
||||
so.Internal = true
|
||||
}
|
||||
|
||||
return &so, releaseF, nil
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
@@ -364,7 +365,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
|
||||
var (
|
||||
err error
|
||||
dockerfileReader io.Reader
|
||||
dockerfileReader io.ReadCloser
|
||||
dockerfileDir string
|
||||
dockerfileName = inp.DockerfilePath
|
||||
toRemove []string
|
||||
@@ -379,11 +380,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, errStdinConflict
|
||||
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(inp.InStream)
|
||||
magic, err := buf.Peek(archiveHeaderSize * 2)
|
||||
rc := inp.InStream.NewReadCloser()
|
||||
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
@@ -391,23 +392,23 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
if isArchive(magic) {
|
||||
// stdin is context
|
||||
up := uploadprovider.New()
|
||||
target.FrontendAttrs["context"] = up.Add(buf)
|
||||
target.FrontendAttrs["context"] = up.Add(rc)
|
||||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errDockerfileConflict
|
||||
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = buf
|
||||
dockerfileReader = rc
|
||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case osutil.IsLocalDir(inp.ContextPath):
|
||||
if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedKey := inp.ContextPath
|
||||
@@ -417,7 +418,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
target.SharedKey = sharedKey
|
||||
switch inp.DockerfilePath {
|
||||
case "-":
|
||||
dockerfileReader = inp.InStream
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
case "":
|
||||
dockerfileDir = inp.ContextPath
|
||||
default:
|
||||
@@ -426,7 +427,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
case IsRemoteURL(inp.ContextPath):
|
||||
if inp.DockerfilePath == "-" {
|
||||
dockerfileReader = inp.InStream
|
||||
dockerfileReader = inp.InStream.NewReadCloser()
|
||||
} else if filepath.IsAbs(inp.DockerfilePath) {
|
||||
dockerfileDir = filepath.Dir(inp.DockerfilePath)
|
||||
dockerfileName = filepath.Base(inp.DockerfilePath)
|
||||
@@ -438,11 +439,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
dockerfileReader = strings.NewReader(inp.DockerfileInline)
|
||||
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||
}
|
||||
|
||||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader)
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -466,7 +467,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
}
|
||||
|
||||
if dockerfileDir != "" {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
@@ -528,7 +529,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
if err := setLocalMount(localName, v.Path, target, addVCSLocalDir); err != nil {
|
||||
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
@@ -570,26 +571,19 @@ func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||
return dig, nil
|
||||
}
|
||||
|
||||
func setLocalMount(name, root string, so *client.SolveOpt, addVCSLocalDir func(key, dir string, so *client.SolveOpt)) error {
|
||||
lm, err := fsutil.NewFS(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err = filepath.EvalSymlinks(root) // keep same behavior as fsutil.NewFS
|
||||
func setLocalMount(name, dir string, so *client.SolveOpt) error {
|
||||
lm, err := fsutil.NewFS(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if so.LocalMounts == nil {
|
||||
so.LocalMounts = map[string]fsutil.FS{}
|
||||
}
|
||||
so.LocalMounts[name] = lm
|
||||
if addVCSLocalDir != nil {
|
||||
addVCSLocalDir(name, root, so)
|
||||
}
|
||||
so.LocalMounts[name] = &fs{FS: lm, dir: dir}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) {
|
||||
dir, err := os.MkdirTemp("", "dockerfile")
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -599,6 +593,16 @@ func createTempDockerfile(r io.Reader) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if multiReader != nil {
|
||||
dt, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
multiReader.Reset(dt)
|
||||
r = bytes.NewReader(dt)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, r); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -635,3 +639,10 @@ func handleLowercaseDockerfile(dir, p string) string {
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type fs struct {
|
||||
fsutil.FS
|
||||
dir string
|
||||
}
|
||||
|
||||
var _ fsutil.FS = &fs{}
|
||||
|
164
build/replicatedstream.go
Normal file
164
build/replicatedstream.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SyncMultiReader struct {
|
||||
source *bufio.Reader
|
||||
buffer []byte
|
||||
static []byte
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond
|
||||
readers []*syncReader
|
||||
err error
|
||||
offset int
|
||||
}
|
||||
|
||||
type syncReader struct {
|
||||
mr *SyncMultiReader
|
||||
offset int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func NewSyncMultiReader(source io.Reader) *SyncMultiReader {
|
||||
mr := &SyncMultiReader{
|
||||
source: bufio.NewReader(source),
|
||||
buffer: make([]byte, 0, 32*1024),
|
||||
}
|
||||
mr.cond = sync.NewCond(&mr.mu)
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Peek(n int) ([]byte, error) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return mr.static[min(n, len(mr.static)):], nil
|
||||
}
|
||||
|
||||
return mr.source.Peek(n)
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) Reset(dt []byte) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
mr.static = dt
|
||||
}
|
||||
|
||||
func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
if mr.static != nil {
|
||||
return io.NopCloser(bytes.NewReader(mr.static))
|
||||
}
|
||||
|
||||
reader := &syncReader{
|
||||
mr: mr,
|
||||
}
|
||||
mr.readers = append(mr.readers, reader)
|
||||
return reader
|
||||
}
|
||||
|
||||
func (sr *syncReader) Read(p []byte) (int, error) {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
func (sr *syncReader) read(p []byte) (int, error) {
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
loop0:
|
||||
for {
|
||||
if sr.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
end := sr.mr.offset + len(sr.mr.buffer)
|
||||
|
||||
if sr.mr.err != nil && sr.offset == end {
|
||||
return 0, sr.mr.err
|
||||
}
|
||||
|
||||
start := sr.offset - sr.mr.offset
|
||||
|
||||
dt := sr.mr.buffer[start:]
|
||||
|
||||
if len(dt) > 0 {
|
||||
n := copy(p, dt)
|
||||
sr.offset += n
|
||||
sr.mr.cond.Broadcast()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// check for readers that have not caught up
|
||||
hasOpen := false
|
||||
for _, r := range sr.mr.readers {
|
||||
if !r.closed {
|
||||
hasOpen = true
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if r.offset < end {
|
||||
sr.mr.cond.Wait()
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
|
||||
if !hasOpen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
last := sr.mr.offset + len(sr.mr.buffer)
|
||||
// another reader has already updated the buffer
|
||||
if last > end || sr.mr.err != nil {
|
||||
return sr.read(p)
|
||||
}
|
||||
|
||||
sr.mr.offset += len(sr.mr.buffer)
|
||||
|
||||
sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)]
|
||||
n, err := sr.mr.source.Read(sr.mr.buffer)
|
||||
if n >= 0 {
|
||||
sr.mr.buffer = sr.mr.buffer[:n]
|
||||
} else {
|
||||
sr.mr.buffer = sr.mr.buffer[:0]
|
||||
}
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
if err != nil {
|
||||
sr.mr.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn := copy(p, sr.mr.buffer)
|
||||
sr.offset += nn
|
||||
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (sr *syncReader) Close() error {
|
||||
sr.mr.mu.Lock()
|
||||
defer sr.mr.mu.Unlock()
|
||||
|
||||
if sr.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
sr.closed = true
|
||||
|
||||
sr.mr.cond.Broadcast()
|
||||
|
||||
return nil
|
||||
}
|
77
build/replicatedstream_test.go
Normal file
77
build/replicatedstream_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
mathrand "math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func generateRandomData(size int) []byte {
|
||||
data := make([]byte, size)
|
||||
rand.Read(data)
|
||||
return data
|
||||
}
|
||||
func TestSyncMultiReaderParallel(t *testing.T) {
|
||||
data := generateRandomData(1024 * 1024)
|
||||
source := bytes.NewReader(data)
|
||||
mr := NewSyncMultiReader(source)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numReaders := 10
|
||||
bufferSize := 4096 * 4
|
||||
|
||||
readers := make([]io.ReadCloser, numReaders)
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
readers[i] = mr.NewReadCloser()
|
||||
}
|
||||
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(readerId int) {
|
||||
defer wg.Done()
|
||||
reader := readers[readerId]
|
||||
defer reader.Close()
|
||||
|
||||
totalRead := 0
|
||||
buf := make([]byte, bufferSize)
|
||||
for totalRead < len(data) {
|
||||
// Simulate random read sizes
|
||||
readSize := mathrand.Intn(bufferSize) //nolint:gosec
|
||||
n, err := reader.Read(buf[:readSize])
|
||||
|
||||
if n > 0 {
|
||||
assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId)
|
||||
totalRead += n
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err, "Reader %d error", readerId)
|
||||
|
||||
if mathrand.Intn(1000) == 0 { //nolint:gosec
|
||||
t.Logf("Reader %d closing", readerId)
|
||||
// Simulate random close
|
||||
return
|
||||
}
|
||||
|
||||
// Simulate random timing between reads
|
||||
time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec
|
||||
}
|
||||
|
||||
assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
@@ -18,7 +17,6 @@ import (
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
@@ -119,37 +117,19 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
|
||||
return nil
|
||||
}
|
||||
|
||||
contextStore := b.opts.dockerCli.ContextStore()
|
||||
|
||||
var kcc driver.KubeClientConfig
|
||||
kcc, err = ctxkube.ConfigFromEndpoint(n.Endpoint, contextStore)
|
||||
if err != nil {
|
||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME(@AkihiroSuda): n should retain real context name.
|
||||
kcc, err = ctxkube.ConfigFromEndpoint("default", contextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
tryToUseKubeConfigInCluster := false
|
||||
if kcc == nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
} else {
|
||||
if _, err := kcc.ClientConfig(); err != nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseKubeConfigInCluster {
|
||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
kcc = kccInCluster
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, driver.BuilderName(n.Name), factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.BuildkitdFlags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash, lno.dialMeta)
|
||||
d, err := driver.GetDriver(ctx, factory, driver.InitConfig{
|
||||
Name: driver.BuilderName(n.Name),
|
||||
EndpointAddr: n.Endpoint,
|
||||
DockerAPI: dockerapi,
|
||||
ContextStore: b.opts.dockerCli.ContextStore(),
|
||||
BuildkitdFlags: n.BuildkitdFlags,
|
||||
Files: n.Files,
|
||||
DriverOpts: n.DriverOpts,
|
||||
Auth: imageopt.Auth,
|
||||
Platforms: n.Platforms,
|
||||
ContextPathHash: b.opts.contextPathHash,
|
||||
DialMeta: lno.dialMeta,
|
||||
})
|
||||
if err != nil {
|
||||
node.Err = err
|
||||
return nil
|
||||
|
311
commands/bake.go
311
commands/bake.go
@@ -4,12 +4,16 @@ import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/containerd/console"
|
||||
@@ -26,14 +30,15 @@ import (
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/osutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
type bakeOptions struct {
|
||||
@@ -44,6 +49,7 @@ type bakeOptions struct {
|
||||
listVars bool
|
||||
sbom string
|
||||
provenance string
|
||||
allow []string
|
||||
|
||||
builder string
|
||||
metadataFile string
|
||||
@@ -53,6 +59,8 @@ type bakeOptions struct {
|
||||
}
|
||||
|
||||
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
|
||||
mp := dockerCli.MeterProvider()
|
||||
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -61,27 +69,12 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
end(err)
|
||||
}()
|
||||
|
||||
var url string
|
||||
cmdContext := "cwd://"
|
||||
|
||||
if len(targets) > 0 {
|
||||
if build.IsRemoteURL(targets[0]) {
|
||||
url = targets[0]
|
||||
targets = targets[1:]
|
||||
if len(targets) > 0 {
|
||||
if build.IsRemoteURL(targets[0]) {
|
||||
cmdContext = targets[0]
|
||||
targets = targets[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
url, cmdContext, targets := bakeArgs(targets)
|
||||
if len(targets) == 0 {
|
||||
targets = []string{"default"}
|
||||
}
|
||||
|
||||
callFunc, err := buildflags.ParsePrintFunc(in.callFunc)
|
||||
callFunc, err := buildflags.ParseCallFunc(in.callFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,6 +103,11 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
contextPathHash, _ := os.Getwd()
|
||||
|
||||
ent, err := bake.ParseEntitlements(in.allow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
@@ -117,6 +115,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
var progressConsoleDesc, progressTextDesc string
|
||||
|
||||
// instance only needed for reading remote bake files or building
|
||||
var driverType string
|
||||
if url != "" || !in.printOnly {
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
@@ -134,53 +133,33 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name)
|
||||
progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver)
|
||||
driverType = b.Driver
|
||||
}
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||
term = true
|
||||
}
|
||||
attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in)
|
||||
|
||||
progressMode := progressui.DisplayMode(cFlags.progress)
|
||||
var printer *progress.Printer
|
||||
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
progress.WithOnClose(func() {
|
||||
if p := printer; p != nil {
|
||||
printWarnings(os.Stderr, p.Warnings(), progressMode)
|
||||
}
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
makePrinter := func() error {
|
||||
var err error
|
||||
printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode,
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
progress.WithMetrics(mp, attributes),
|
||||
progress.WithOnClose(func() {
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
}),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
var resp map[string]*client.SolveResponse
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
if resp != nil && len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
err = writeMetadataFile(in.metadataFile, dt)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := makePrinter(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer)
|
||||
if err != nil {
|
||||
@@ -203,10 +182,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = printer.Wait()
|
||||
printer = nil
|
||||
if err != nil {
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.listTargets {
|
||||
@@ -249,68 +225,79 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
}
|
||||
|
||||
if in.printOnly {
|
||||
dt, err := json.MarshalIndent(def, "", " ")
|
||||
if err = printer.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = printer.Wait()
|
||||
printer = nil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), string(dt))
|
||||
return nil
|
||||
_, err = fmt.Fprintln(dockerCli.Out(), string(dtdef))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, opt := range bo {
|
||||
if opt.PrintFunc != nil {
|
||||
cf, err := buildflags.ParsePrintFunc(opt.PrintFunc.Name)
|
||||
if opt.CallFunc != nil {
|
||||
cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt.PrintFunc.Name = cf.Name
|
||||
opt.CallFunc.Name = cf.Name
|
||||
}
|
||||
}
|
||||
|
||||
prm := confutil.MetadataProvenance()
|
||||
if len(in.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
|
||||
groupRef := identity.NewID()
|
||||
var refs []string
|
||||
for k, b := range bo {
|
||||
b.Ref = identity.NewID()
|
||||
b.GroupRef = groupRef
|
||||
b.ProvenanceResponseMode = prm
|
||||
refs = append(refs, b.Ref)
|
||||
bo[k] = b
|
||||
}
|
||||
dt, err := json.Marshal(def)
|
||||
exp, err := ent.Validate(bo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := saveLocalStateGroup(dockerCli, groupRef, localstate.StateGroup{
|
||||
Definition: dt,
|
||||
Targets: targets,
|
||||
Inputs: overrides,
|
||||
Refs: refs,
|
||||
}); err != nil {
|
||||
if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||
return err
|
||||
}
|
||||
if printer.IsDone() {
|
||||
// init new printer as old one was stopped to show the prompt
|
||||
if err := makePrinter(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err = build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return wrapBuildError(err, true)
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
if retErr != nil {
|
||||
err = wrapBuildError(retErr, true)
|
||||
}
|
||||
done(err)
|
||||
|
||||
err = printer.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if callFunc == nil {
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
}
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var callFormatJSON bool
|
||||
var jsonResults = map[string]map[string]any{}
|
||||
jsonResults := map[string]map[string]any{}
|
||||
if callFunc != nil {
|
||||
callFormatJSON = callFunc.Format == "json"
|
||||
}
|
||||
@@ -325,14 +312,14 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
|
||||
for _, name := range names {
|
||||
req := bo[name]
|
||||
if req.PrintFunc == nil {
|
||||
if req.CallFunc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pf := &pb.PrintFunc{
|
||||
Name: req.PrintFunc.Name,
|
||||
Format: req.PrintFunc.Format,
|
||||
IgnoreStatus: req.PrintFunc.IgnoreStatus,
|
||||
pf := &pb.CallFunc{
|
||||
Name: req.CallFunc.Name,
|
||||
Format: req.CallFunc.Format,
|
||||
IgnoreStatus: req.CallFunc.IgnoreStatus,
|
||||
}
|
||||
|
||||
if callFunc != nil {
|
||||
@@ -396,7 +383,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
"build": def,
|
||||
}
|
||||
if res, ok := jsonResults[name]; ok {
|
||||
printName := bo[name].PrintFunc.Name
|
||||
printName := bo[name].CallFunc.Name
|
||||
if printName == "lint" {
|
||||
printName = "check"
|
||||
}
|
||||
@@ -451,6 +438,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources")
|
||||
|
||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.Lookup("check").NoOptDefVal = "true"
|
||||
@@ -468,12 +456,49 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func saveLocalStateGroup(dockerCli command.Cli, ref string, lsg localstate.StateGroup) error {
|
||||
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
|
||||
prm := confutil.MetadataProvenance()
|
||||
if len(in.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
groupRef := identity.NewID()
|
||||
refs := make([]string, 0, len(bo))
|
||||
for k, b := range bo {
|
||||
b.Ref = identity.NewID()
|
||||
b.GroupRef = groupRef
|
||||
b.ProvenanceResponseMode = prm
|
||||
refs = append(refs, b.Ref)
|
||||
bo[k] = b
|
||||
}
|
||||
l, err := localstate.New(confutil.ConfigDir(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveGroup(ref, lsg)
|
||||
dtdef, err := json.MarshalIndent(def, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.SaveGroup(groupRef, localstate.StateGroup{
|
||||
Definition: dtdef,
|
||||
Targets: targets,
|
||||
Inputs: overrides,
|
||||
Refs: refs,
|
||||
})
|
||||
}
|
||||
|
||||
// bakeArgs will retrieve the remote url, command context, and targets
|
||||
// from the command line arguments.
|
||||
func bakeArgs(args []string) (url, cmdContext string, targets []string) {
|
||||
cmdContext, targets = "cwd://", args
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
url, targets = targets[0], targets[1:]
|
||||
if len(targets) == 0 || !build.IsRemoteURL(targets[0]) {
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
cmdContext, targets = targets[0], targets[1:]
|
||||
return url, cmdContext, targets
|
||||
}
|
||||
|
||||
func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) {
|
||||
@@ -590,3 +615,85 @@ func printTargetList(w io.Writer, cfg *bake.Config) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set {
|
||||
return attribute.NewSet(
|
||||
commandNameAttribute.String("bake"),
|
||||
attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{
|
||||
bakeOptions: options,
|
||||
configDir: confutil.ConfigDir(dockerCli),
|
||||
url: url,
|
||||
cmdContext: cmdContext,
|
||||
targets: targets,
|
||||
}),
|
||||
driverNameAttribute.String(options.builder),
|
||||
driverTypeAttribute.String(driverType),
|
||||
)
|
||||
}
|
||||
|
||||
type bakeOptionsHash struct {
|
||||
*bakeOptions
|
||||
configDir string
|
||||
url string
|
||||
cmdContext string
|
||||
targets []string
|
||||
result string
|
||||
resultOnce sync.Once
|
||||
}
|
||||
|
||||
func (o *bakeOptionsHash) String() string {
|
||||
o.resultOnce.Do(func() {
|
||||
url := o.url
|
||||
cmdContext := o.cmdContext
|
||||
if cmdContext == "cwd://" {
|
||||
// Resolve the directory if the cmdContext is the current working directory.
|
||||
cmdContext = osutil.GetWd()
|
||||
}
|
||||
|
||||
// Sort the inputs for files and targets since the ordering
|
||||
// doesn't matter, but avoid modifying the original slice.
|
||||
files := immutableSort(o.files)
|
||||
targets := immutableSort(o.targets)
|
||||
|
||||
joinedFiles := strings.Join(files, ",")
|
||||
joinedTargets := strings.Join(targets, ",")
|
||||
salt := confutil.TryNodeIdentifier(o.configDir)
|
||||
|
||||
h := sha256.New()
|
||||
for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} {
|
||||
_, _ = io.WriteString(h, s)
|
||||
h.Write([]byte{0})
|
||||
}
|
||||
o.result = hex.EncodeToString(h.Sum(nil))
|
||||
})
|
||||
return o.result
|
||||
}
|
||||
|
||||
// immutableSort will sort the entries in s without modifying the original slice.
|
||||
func immutableSort(s []string) []string {
|
||||
if !sort.StringsAreSorted(s) {
|
||||
cpy := make([]string, len(s))
|
||||
copy(cpy, s)
|
||||
sort.Strings(cpy)
|
||||
return cpy
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
w io.Writer
|
||||
once sync.Once
|
||||
wait func() error
|
||||
}
|
||||
|
||||
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||
w.once.Do(func() {
|
||||
if w.wait != nil {
|
||||
err = w.wait()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.w.Write(p)
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ type buildOptions struct {
|
||||
noCacheFilter []string
|
||||
outputs []string
|
||||
platforms []string
|
||||
printFunc string
|
||||
callFunc string
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
@@ -199,13 +199,13 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.PrintFunc, err = buildflags.ParsePrintFunc(o.printFunc)
|
||||
opts.CallFunc, err = buildflags.ParseCallFunc(o.callFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prm := confutil.MetadataProvenance()
|
||||
if opts.PrintFunc != nil || len(o.metadataFile) == 0 {
|
||||
if opts.CallFunc != nil || len(o.metadataFile) == 0 {
|
||||
prm = confutil.MetadataProvenanceModeDisabled
|
||||
}
|
||||
opts.ProvenanceResponseMode = string(prm)
|
||||
@@ -224,15 +224,22 @@ func (o *buildOptions) toDisplayMode() (progressui.DisplayMode, error) {
|
||||
return progress, nil
|
||||
}
|
||||
|
||||
func buildMetricAttributes(dockerCli command.Cli, b *builder.Builder, options *buildOptions) attribute.Set {
|
||||
const (
|
||||
commandNameAttribute = attribute.Key("command.name")
|
||||
commandOptionsHash = attribute.Key("command.options.hash")
|
||||
driverNameAttribute = attribute.Key("driver.name")
|
||||
driverTypeAttribute = attribute.Key("driver.type")
|
||||
)
|
||||
|
||||
func buildMetricAttributes(dockerCli command.Cli, driverType string, options *buildOptions) attribute.Set {
|
||||
return attribute.NewSet(
|
||||
attribute.String("command.name", "build"),
|
||||
attribute.Stringer("command.options.hash", &buildOptionsHash{
|
||||
commandNameAttribute.String("build"),
|
||||
attribute.Stringer(string(commandOptionsHash), &buildOptionsHash{
|
||||
buildOptions: options,
|
||||
configDir: confutil.ConfigDir(dockerCli),
|
||||
}),
|
||||
attribute.String("driver.name", options.builder),
|
||||
attribute.String("driver.type", b.Driver),
|
||||
driverNameAttribute.String(options.builder),
|
||||
driverTypeAttribute.String(driverType),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -308,12 +315,13 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
driverType := b.Driver
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stderr); err == nil {
|
||||
term = true
|
||||
}
|
||||
attributes := buildMetricAttributes(dockerCli, b, &options)
|
||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
@@ -367,21 +375,24 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
||||
return errors.Wrap(err, "writing image ID file")
|
||||
}
|
||||
}
|
||||
if opts.PrintFunc != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.PrintFunc, resp.ExporterResponse); err != nil {
|
||||
return err
|
||||
} else if exitcode != 0 {
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
} else if options.metadataFile != "" {
|
||||
if options.metadataFile != "" {
|
||||
dt := decodeExporterResponse(resp.ExporterResponse)
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
if opts.CallFunc == nil {
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
}
|
||||
if err := writeMetadataFile(options.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opts.CallFunc != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil {
|
||||
return err
|
||||
} else if exitcode != 0 {
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -634,8 +645,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config")
|
||||
}
|
||||
|
||||
flags.StringVar(&options.printFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.VarPF(callAlias(&options.printFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
|
||||
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
|
||||
flags.Lookup("check").NoOptDefVal = "true"
|
||||
|
||||
// hidden flags
|
||||
@@ -644,7 +655,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
var ignoreBool bool
|
||||
var ignoreInt int64
|
||||
|
||||
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets)")
|
||||
flags.StringVar(&options.callFunc, "print", "", "Print result of information request (e.g., outline, targets)")
|
||||
cobrautil.MarkFlagsExperimental(flags, "print")
|
||||
flags.MarkHidden("print")
|
||||
|
||||
@@ -731,9 +742,17 @@ func writeMetadataFile(filename string, dt interface{}) error {
|
||||
}
|
||||
|
||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
||||
decFunc := func(k, v string) ([]byte, error) {
|
||||
if k == "result.json" {
|
||||
// result.json is part of metadata response for subrequests which
|
||||
// is already a JSON object: https://github.com/moby/buildkit/blob/f6eb72f2f5db07ddab89ac5e2bd3939a6444f4be/frontend/dockerui/requests.go#L100-L102
|
||||
return []byte(v), nil
|
||||
}
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
out := make(map[string]interface{})
|
||||
for k, v := range exporterResponse {
|
||||
dt, err := base64.StdEncoding.DecodeString(v)
|
||||
dt, err := decFunc(k, v)
|
||||
if err != nil {
|
||||
out[k] = v
|
||||
continue
|
||||
@@ -863,7 +882,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
||||
}
|
||||
}
|
||||
|
||||
func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string) (int, error) {
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) {
|
||||
switch f.Name {
|
||||
case "outline":
|
||||
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
@@ -872,17 +891,29 @@ func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string)
|
||||
case "subrequests.describe":
|
||||
return 0, printValue(w, subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
||||
case "lint":
|
||||
err := printValue(w, lint.PrintLintViolations, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
lintResults := lint.LintResults{}
|
||||
if result, ok := res["result.json"]; ok {
|
||||
if err := json.Unmarshal([]byte(result), &lintResults); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
warningCount := len(lintResults.Warnings)
|
||||
if f.Format != "json" && warningCount > 0 {
|
||||
var warningCountMsg string
|
||||
if warningCount == 1 {
|
||||
warningCountMsg = "1 warning has been found!"
|
||||
} else if warningCount > 1 {
|
||||
warningCountMsg = fmt.Sprintf("%d warnings have been found!", warningCount)
|
||||
}
|
||||
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
||||
}
|
||||
|
||||
err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if lintResults.Error != nil {
|
||||
// Print the error message and the source
|
||||
// Normally, we would use `errdefs.WithSource` to attach the source to the
|
||||
@@ -921,9 +952,9 @@ func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type printFunc func([]byte, io.Writer) error
|
||||
type callFunc func([]byte, io.Writer) error
|
||||
|
||||
func printValue(w io.Writer, printer printFunc, version string, format string, res map[string]string) error {
|
||||
func printValue(w io.Writer, printer callFunc, version string, format string, res map[string]string) error {
|
||||
if format == "json" {
|
||||
fmt.Fprintln(w, res["result.json"])
|
||||
return nil
|
||||
@@ -937,6 +968,11 @@ func printValue(w io.Writer, printer printFunc, version string, format string, r
|
||||
return printer([]byte(res["result.json"]), w)
|
||||
}
|
||||
|
||||
// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted
|
||||
func printLintViolationsWrapper(dt []byte, w io.Writer) error {
|
||||
return lint.PrintLintViolations(dt, w, nil)
|
||||
}
|
||||
|
||||
type invokeConfig struct {
|
||||
controllerapi.InvokeConfig
|
||||
onFlag string
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||
var opt rootOptions
|
||||
cmd := &cobra.Command{
|
||||
Short: "Docker Buildx",
|
||||
Long: `Extended build capabilities with BuildKit`,
|
||||
@@ -32,6 +33,10 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
HiddenDefaultCmd: true,
|
||||
},
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if opt.debug {
|
||||
debug.Enable()
|
||||
}
|
||||
|
||||
cmd.SetContext(appcontext.Context())
|
||||
if !isPlugin {
|
||||
return nil
|
||||
@@ -47,11 +52,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
cmd.TraverseChildren = true
|
||||
cmd.DisableFlagsInUseLine = true
|
||||
cli.DisableFlagsInUseLine(cmd)
|
||||
|
||||
// DEBUG=1 should perform the same as --debug at the docker root level
|
||||
if debug.IsEnabled() {
|
||||
debug.Enable()
|
||||
}
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&logutil.Formatter{})
|
||||
@@ -68,16 +68,16 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
cmd.SetHelpTemplate(cmd.HelpTemplate() + "\nExperimental commands and flags are hidden. Set BUILDX_EXPERIMENTAL=1 to show them.\n")
|
||||
}
|
||||
|
||||
addCommands(cmd, dockerCli)
|
||||
addCommands(cmd, &opt, dockerCli)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type rootOptions struct {
|
||||
builder string
|
||||
debug bool
|
||||
}
|
||||
|
||||
func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
opts := &rootOptions{}
|
||||
func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||
rootFlags(opts, cmd.PersistentFlags())
|
||||
|
||||
cmd.AddCommand(
|
||||
@@ -112,4 +112,5 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
|
||||
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance")
|
||||
flags.BoolVarP(&options.debug, "debug", "D", debug.IsEnabled(), "Enable debug logging")
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package build
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/moby/buildkit/client"
|
||||
@@ -50,7 +48,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: in.ContextPath,
|
||||
DockerfilePath: in.DockerfileName,
|
||||
InStream: inStream,
|
||||
InStream: build.NewSyncMultiReader(inStream),
|
||||
NamedContexts: contexts,
|
||||
},
|
||||
Ref: in.Ref,
|
||||
@@ -76,7 +74,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
}
|
||||
opts.Platforms = platforms
|
||||
|
||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
||||
dockerConfig := dockerCli.ConfigFile()
|
||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
||||
|
||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||
@@ -160,11 +158,11 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
}
|
||||
opts.Allow = allow
|
||||
|
||||
if in.PrintFunc != nil {
|
||||
opts.PrintFunc = &build.PrintFunc{
|
||||
Name: in.PrintFunc.Name,
|
||||
Format: in.PrintFunc.Format,
|
||||
IgnoreStatus: in.PrintFunc.IgnoreStatus,
|
||||
if in.CallFunc != nil {
|
||||
opts.CallFunc = &build.CallFunc{
|
||||
Name: in.CallFunc.Name,
|
||||
Format: in.CallFunc.Format,
|
||||
IgnoreStatus: in.CallFunc.IgnoreStatus,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -273,7 +273,7 @@ func (m *BuildRequest) GetOptions() *BuildOptions {
|
||||
type BuildOptions struct {
|
||||
ContextPath string `protobuf:"bytes,1,opt,name=ContextPath,proto3" json:"ContextPath,omitempty"`
|
||||
DockerfileName string `protobuf:"bytes,2,opt,name=DockerfileName,proto3" json:"DockerfileName,omitempty"`
|
||||
PrintFunc *PrintFunc `protobuf:"bytes,3,opt,name=PrintFunc,proto3" json:"PrintFunc,omitempty"`
|
||||
CallFunc *CallFunc `protobuf:"bytes,3,opt,name=CallFunc,proto3" json:"CallFunc,omitempty"`
|
||||
NamedContexts map[string]string `protobuf:"bytes,4,rep,name=NamedContexts,proto3" json:"NamedContexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Allow []string `protobuf:"bytes,5,rep,name=Allow,proto3" json:"Allow,omitempty"`
|
||||
Attests []*Attest `protobuf:"bytes,6,rep,name=Attests,proto3" json:"Attests,omitempty"`
|
||||
@@ -346,9 +346,9 @@ func (m *BuildOptions) GetDockerfileName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BuildOptions) GetPrintFunc() *PrintFunc {
|
||||
func (m *BuildOptions) GetCallFunc() *CallFunc {
|
||||
if m != nil {
|
||||
return m.PrintFunc
|
||||
return m.CallFunc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -810,7 +810,7 @@ func (m *Secret) GetEnv() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type PrintFunc struct {
|
||||
type CallFunc struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||
Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"`
|
||||
IgnoreStatus bool `protobuf:"varint,3,opt,name=IgnoreStatus,proto3" json:"IgnoreStatus,omitempty"`
|
||||
@@ -819,45 +819,45 @@ type PrintFunc struct {
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PrintFunc) Reset() { *m = PrintFunc{} }
|
||||
func (m *PrintFunc) String() string { return proto.CompactTextString(m) }
|
||||
func (*PrintFunc) ProtoMessage() {}
|
||||
func (*PrintFunc) Descriptor() ([]byte, []int) {
|
||||
func (m *CallFunc) Reset() { *m = CallFunc{} }
|
||||
func (m *CallFunc) String() string { return proto.CompactTextString(m) }
|
||||
func (*CallFunc) ProtoMessage() {}
|
||||
func (*CallFunc) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ed7f10298fa1d90f, []int{12}
|
||||
}
|
||||
func (m *PrintFunc) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PrintFunc.Unmarshal(m, b)
|
||||
func (m *CallFunc) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CallFunc.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PrintFunc.Marshal(b, m, deterministic)
|
||||
func (m *CallFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CallFunc.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PrintFunc.Merge(m, src)
|
||||
func (m *CallFunc) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CallFunc.Merge(m, src)
|
||||
}
|
||||
func (m *PrintFunc) XXX_Size() int {
|
||||
return xxx_messageInfo_PrintFunc.Size(m)
|
||||
func (m *CallFunc) XXX_Size() int {
|
||||
return xxx_messageInfo_CallFunc.Size(m)
|
||||
}
|
||||
func (m *PrintFunc) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PrintFunc.DiscardUnknown(m)
|
||||
func (m *CallFunc) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CallFunc.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PrintFunc proto.InternalMessageInfo
|
||||
var xxx_messageInfo_CallFunc proto.InternalMessageInfo
|
||||
|
||||
func (m *PrintFunc) GetName() string {
|
||||
func (m *CallFunc) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PrintFunc) GetFormat() string {
|
||||
func (m *CallFunc) GetFormat() string {
|
||||
if m != nil {
|
||||
return m.Format
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PrintFunc) GetIgnoreStatus() bool {
|
||||
func (m *CallFunc) GetIgnoreStatus() bool {
|
||||
if m != nil {
|
||||
return m.IgnoreStatus
|
||||
}
|
||||
@@ -2062,7 +2062,7 @@ func init() {
|
||||
proto.RegisterType((*Attest)(nil), "buildx.controller.v1.Attest")
|
||||
proto.RegisterType((*SSH)(nil), "buildx.controller.v1.SSH")
|
||||
proto.RegisterType((*Secret)(nil), "buildx.controller.v1.Secret")
|
||||
proto.RegisterType((*PrintFunc)(nil), "buildx.controller.v1.PrintFunc")
|
||||
proto.RegisterType((*CallFunc)(nil), "buildx.controller.v1.CallFunc")
|
||||
proto.RegisterType((*InspectRequest)(nil), "buildx.controller.v1.InspectRequest")
|
||||
proto.RegisterType((*InspectResponse)(nil), "buildx.controller.v1.InspectResponse")
|
||||
proto.RegisterType((*UlimitOpt)(nil), "buildx.controller.v1.UlimitOpt")
|
||||
@@ -2094,130 +2094,130 @@ func init() {
|
||||
func init() { proto.RegisterFile("controller.proto", fileDescriptor_ed7f10298fa1d90f) }
|
||||
|
||||
var fileDescriptor_ed7f10298fa1d90f = []byte{
|
||||
// 1957 bytes of a gzipped FileDescriptorProto
|
||||
// 1961 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x73, 0x1b, 0xb7,
|
||||
0x11, 0xef, 0x91, 0x14, 0xff, 0x2c, 0x45, 0xd9, 0x46, 0x6d, 0x17, 0x3e, 0x3b, 0xb6, 0x7c, 0xb6,
|
||||
0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x99, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6,
|
||||
0xa4, 0x01, 0x65, 0x67, 0xda, 0xcc, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00,
|
||||
0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xad, 0x1f,
|
||||
0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3e, 0x11, 0xbb, 0xf8, 0xed,
|
||||
0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x9d, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6,
|
||||
0xa4, 0x01, 0x65, 0x67, 0x9a, 0xce, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00,
|
||||
0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xa1, 0xdf,
|
||||
0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3c, 0x11, 0xbb, 0xf8, 0xed,
|
||||
0x2e, 0x76, 0x6f, 0xb1, 0xbb, 0x20, 0xdc, 0xee, 0x89, 0x48, 0xc5, 0x22, 0x0c, 0x79, 0xdc, 0x1a,
|
||||
0xc6, 0x42, 0x09, 0x72, 0xf7, 0x60, 0x14, 0x84, 0xfd, 0xf3, 0x56, 0x66, 0xe3, 0xf4, 0x73, 0xf7,
|
||||
0xed, 0x20, 0x50, 0x47, 0xa3, 0x83, 0x56, 0x4f, 0x9c, 0xac, 0x9c, 0x88, 0x83, 0x8b, 0x15, 0x44,
|
||||
0x1d, 0x07, 0x6a, 0xc5, 0x1f, 0x06, 0x2b, 0x92, 0xc7, 0xa7, 0x41, 0x8f, 0xcb, 0x15, 0x2b, 0x94,
|
||||
0xfc, 0x1a, 0x95, 0xee, 0xab, 0x5c, 0x61, 0x29, 0x46, 0x71, 0x8f, 0x0f, 0x45, 0x18, 0xf4, 0x2e,
|
||||
0x56, 0x86, 0x07, 0x2b, 0x66, 0x65, 0xc4, 0xbc, 0x26, 0xdc, 0x7d, 0x17, 0x48, 0xb5, 0x17, 0x8b,
|
||||
0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x30, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a,
|
||||
0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x38, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a,
|
||||
0xcb, 0x4e, 0xb3, 0xc6, 0xf4, 0xd2, 0xdb, 0x83, 0x7b, 0x97, 0x90, 0x72, 0x28, 0x22, 0xc9, 0xc9,
|
||||
0x6b, 0x58, 0xd8, 0x8e, 0x0e, 0x85, 0xa4, 0xce, 0x72, 0xb1, 0x59, 0x5f, 0x7d, 0xda, 0x9a, 0xe5,
|
||||
0x5c, 0xcb, 0xca, 0x69, 0x24, 0x33, 0x78, 0x4f, 0x42, 0x3d, 0xc3, 0x25, 0x8f, 0xa0, 0x96, 0x90,
|
||||
0x1b, 0xd6, 0xf0, 0x98, 0x41, 0x3a, 0xb0, 0xb8, 0x1d, 0x9d, 0x8a, 0x63, 0xde, 0x16, 0xd1, 0x61,
|
||||
0x30, 0xa0, 0x85, 0x65, 0xa7, 0x59, 0x5f, 0xf5, 0x66, 0x1b, 0xcb, 0x22, 0xd9, 0x84, 0x9c, 0xf7,
|
||||
0x1d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a,
|
||||
0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x1e, 0x16, 0xd7, 0xf5,
|
||||
0x2d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a,
|
||||
0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x01, 0x16, 0xd7, 0xf5,
|
||||
0xd9, 0xf2, 0x95, 0x7f, 0x03, 0x95, 0xdd, 0xa1, 0x0a, 0x44, 0x24, 0xe7, 0x7b, 0x83, 0x6a, 0x2c,
|
||||
0x92, 0x25, 0x22, 0xde, 0xbf, 0x17, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1,
|
||||
0x92, 0x25, 0x22, 0xde, 0x7f, 0x16, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1,
|
||||
0x73, 0xb5, 0xe7, 0xab, 0x23, 0x6b, 0x28, 0xcb, 0x22, 0x9f, 0xc2, 0xd2, 0x86, 0xe8, 0x1d, 0xf3,
|
||||
0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x5b, 0xed, 0x75, 0x10,
|
||||
0xa9, 0xce, 0x28, 0xea, 0xd1, 0x22, 0x1e, 0xed, 0x49, 0xde, 0x57, 0xb5, 0x30, 0x36, 0x96, 0x20,
|
||||
0x3f, 0x40, 0x43, 0xab, 0xe9, 0x5b, 0xd3, 0x92, 0x96, 0x30, 0x31, 0x5e, 0x5d, 0xed, 0x5d, 0x6b,
|
||||
0x42, 0x6e, 0x33, 0x52, 0xf1, 0x05, 0x9b, 0xd4, 0x45, 0xee, 0xc2, 0xc2, 0x5a, 0x18, 0x8a, 0x33,
|
||||
0xba, 0xb0, 0x5c, 0x6c, 0xd6, 0x98, 0x21, 0xc8, 0x97, 0x50, 0x59, 0x53, 0x8a, 0x4b, 0x25, 0x69,
|
||||
0x19, 0x8d, 0x3d, 0x9a, 0x6d, 0xcc, 0x80, 0x58, 0x02, 0x26, 0xbb, 0x50, 0x43, 0xfb, 0x6b, 0xf1,
|
||||
0x40, 0xd2, 0x0a, 0x4a, 0x7e, 0x7e, 0x8d, 0x63, 0xa6, 0x32, 0xe6, 0x88, 0x63, 0x1d, 0x64, 0x13,
|
||||
0x6a, 0x6d, 0xbf, 0x77, 0xc4, 0x3b, 0xb1, 0x38, 0xa1, 0x55, 0x54, 0xf8, 0xf3, 0xd9, 0x0a, 0x11,
|
||||
0x66, 0x15, 0x5a, 0x35, 0xa9, 0x24, 0x59, 0x83, 0x0a, 0x12, 0xfb, 0x82, 0xd6, 0x6e, 0xa6, 0x24,
|
||||
0x91, 0x23, 0x1e, 0x2c, 0xb6, 0x07, 0xb1, 0x18, 0x0d, 0xf7, 0xfc, 0x98, 0x47, 0x8a, 0x02, 0x7e,
|
||||
0xea, 0x09, 0x1e, 0x79, 0x0b, 0x95, 0xcd, 0xf3, 0xa1, 0x88, 0x95, 0xa4, 0xf5, 0x79, 0x97, 0xd7,
|
||||
0x80, 0xac, 0x01, 0x2b, 0x41, 0x1e, 0x03, 0x6c, 0x9e, 0xab, 0xd8, 0xdf, 0x12, 0x3a, 0xec, 0x8b,
|
||||
0xf8, 0x39, 0x32, 0x1c, 0xd2, 0x81, 0xf2, 0x3b, 0xff, 0x80, 0x87, 0x92, 0x36, 0x50, 0x77, 0xeb,
|
||||
0x1a, 0x81, 0x35, 0x02, 0xc6, 0x90, 0x95, 0xd6, 0x79, 0xbd, 0xc3, 0xd5, 0x99, 0x88, 0x8f, 0xdf,
|
||||
0x8b, 0x3e, 0xa7, 0x4b, 0x26, 0xaf, 0x33, 0x2c, 0xf2, 0x1c, 0x1a, 0x3b, 0xc2, 0x04, 0x2f, 0x08,
|
||||
0x15, 0x8f, 0xe9, 0x2d, 0x3c, 0xcc, 0x24, 0x13, 0xef, 0x72, 0xe8, 0xab, 0x43, 0x11, 0x9f, 0x48,
|
||||
0x7a, 0x1b, 0x11, 0x63, 0x86, 0xce, 0xa0, 0x2e, 0xef, 0xc5, 0x5c, 0x49, 0x7a, 0x67, 0x5e, 0x06,
|
||||
0x19, 0x10, 0x4b, 0xc0, 0x84, 0x42, 0xa5, 0x7b, 0x74, 0xd2, 0x0d, 0xfe, 0xc8, 0x29, 0x59, 0x76,
|
||||
0x9a, 0x45, 0x96, 0x90, 0xe4, 0x25, 0x14, 0xbb, 0xdd, 0x2d, 0xfa, 0x53, 0xd4, 0xf6, 0x20, 0x47,
|
||||
0x5b, 0x77, 0x8b, 0x69, 0x14, 0x21, 0x50, 0xda, 0xf7, 0x07, 0x92, 0xde, 0xc5, 0x73, 0xe1, 0x9a,
|
||||
0xdc, 0x87, 0xf2, 0xbe, 0x1f, 0x0f, 0xb8, 0xa2, 0xf7, 0xd0, 0x67, 0x4b, 0x91, 0x37, 0x50, 0xf9,
|
||||
0x10, 0x06, 0x27, 0x81, 0x92, 0xf4, 0xfe, 0xbc, 0xcb, 0x69, 0x40, 0xbb, 0x43, 0xc5, 0x12, 0xbc,
|
||||
0x3e, 0x2d, 0xc6, 0x9b, 0xc7, 0xf4, 0x67, 0xa8, 0x33, 0x21, 0xf5, 0x8e, 0x0d, 0x17, 0xa5, 0xcb,
|
||||
0x4e, 0xb3, 0xca, 0x12, 0x52, 0x1f, 0x6d, 0x6f, 0x14, 0x86, 0xf4, 0x01, 0xb2, 0x71, 0x6d, 0xbe,
|
||||
0xbd, 0x4e, 0x83, 0xbd, 0x91, 0x3c, 0xa2, 0x2e, 0xee, 0x64, 0x38, 0xe3, 0xfd, 0x77, 0xc2, 0xef,
|
||||
0xd3, 0x87, 0xd9, 0x7d, 0xcd, 0x21, 0xdb, 0xb0, 0xd8, 0xc5, 0xb6, 0xb4, 0x87, 0xcd, 0x88, 0x3e,
|
||||
0x42, 0x3f, 0x5e, 0xb4, 0x74, 0xe7, 0x6a, 0x25, 0x9d, 0x4b, 0xfb, 0x90, 0x6d, 0x5e, 0x2d, 0x03,
|
||||
0x66, 0x13, 0xa2, 0x49, 0x5d, 0xfd, 0x64, 0x5c, 0x57, 0x5d, 0xa8, 0xfe, 0x46, 0x27, 0xb9, 0x66,
|
||||
0x3f, 0x46, 0x76, 0x4a, 0xeb, 0x64, 0x5a, 0x8b, 0x22, 0xa1, 0x7c, 0x53, 0x77, 0x9f, 0x60, 0xb8,
|
||||
0xb3, 0x2c, 0xf2, 0x25, 0xdc, 0xdf, 0x8b, 0xc5, 0x29, 0x8f, 0xfc, 0xa8, 0xc7, 0x93, 0x6a, 0x8e,
|
||||
0x99, 0xb7, 0x8c, 0xba, 0x72, 0x76, 0xdd, 0x5f, 0x03, 0x99, 0xae, 0x5e, 0xfa, 0x74, 0xc7, 0xfc,
|
||||
0x22, 0xa9, 0xfa, 0xc7, 0xfc, 0x42, 0x17, 0xb0, 0x53, 0x3f, 0x1c, 0x25, 0xb5, 0xd7, 0x10, 0x5f,
|
||||
0x17, 0xbe, 0x72, 0xdc, 0x6f, 0x60, 0x69, 0xb2, 0xb0, 0xdc, 0x48, 0xfa, 0x0d, 0xd4, 0x33, 0xb7,
|
||||
0xe7, 0x26, 0xa2, 0xde, 0xbf, 0x1c, 0xa8, 0x67, 0xae, 0x38, 0x26, 0xe3, 0xc5, 0x90, 0x5b, 0x61,
|
||||
0x5c, 0x93, 0x75, 0x58, 0x58, 0x53, 0x2a, 0xd6, 0xad, 0x4a, 0xe7, 0xf3, 0x2f, 0xaf, 0x2c, 0x14,
|
||||
0x2d, 0x84, 0x9b, 0xab, 0x6c, 0x44, 0x75, 0xf0, 0x37, 0xb8, 0x54, 0x41, 0x84, 0xa1, 0xc6, 0xce,
|
||||
0x52, 0x63, 0x59, 0x96, 0xfb, 0x15, 0xc0, 0x58, 0xec, 0x46, 0x3e, 0xfc, 0xc3, 0x81, 0x3b, 0x53,
|
||||
0xd5, 0x70, 0xa6, 0x27, 0x5b, 0x93, 0x9e, 0xac, 0x5e, 0xb3, 0xb2, 0x4e, 0xfb, 0xf3, 0x7f, 0x9c,
|
||||
0x76, 0x07, 0xca, 0xa6, 0x05, 0xcd, 0x3c, 0xa1, 0x0b, 0xd5, 0x8d, 0x40, 0xfa, 0x07, 0x21, 0xef,
|
||||
0xa3, 0x68, 0x95, 0xa5, 0x34, 0xf6, 0x3f, 0x3c, 0xbd, 0x89, 0x9e, 0x21, 0x3c, 0x53, 0x6b, 0xc8,
|
||||
0x12, 0x14, 0xd2, 0xd9, 0xa9, 0xb0, 0xbd, 0xa1, 0xc1, 0xba, 0xf1, 0x1b, 0x57, 0x6b, 0xcc, 0x10,
|
||||
0x5e, 0x07, 0xca, 0xa6, 0x7a, 0x4d, 0xe1, 0x5d, 0xa8, 0x76, 0x82, 0x90, 0xe3, 0xfc, 0x60, 0xce,
|
||||
0x9c, 0xd2, 0xda, 0xbd, 0xcd, 0xe8, 0xd4, 0x9a, 0xd5, 0x4b, 0xef, 0x87, 0xcc, 0x98, 0xa0, 0xfd,
|
||||
0xc0, 0x89, 0xc2, 0xfa, 0x81, 0x73, 0xc4, 0x7d, 0x28, 0x77, 0x44, 0x7c, 0xe2, 0x2b, 0xab, 0xcc,
|
||||
0x52, 0xba, 0x35, 0x6d, 0x0f, 0x22, 0x11, 0xf3, 0xae, 0xf2, 0xd5, 0xc8, 0xb8, 0x52, 0x65, 0x13,
|
||||
0x3c, 0xcf, 0x83, 0xa5, 0xed, 0x48, 0x0e, 0x79, 0x4f, 0xe5, 0x8f, 0xa4, 0xbb, 0x70, 0x2b, 0xc5,
|
||||
0xd8, 0x61, 0x34, 0x33, 0x53, 0x39, 0x37, 0x9f, 0xa9, 0xfe, 0xee, 0x40, 0x2d, 0xad, 0x9a, 0xa4,
|
||||
0x0d, 0x65, 0xfc, 0x62, 0xc9, 0x64, 0xfb, 0xf2, 0x8a, 0x32, 0xdb, 0xfa, 0x88, 0x68, 0xdb, 0xbd,
|
||||
0x8c, 0xa8, 0xfb, 0x3d, 0xd4, 0x33, 0xec, 0x19, 0x49, 0xb2, 0x9a, 0x4d, 0x92, 0xdc, 0xb6, 0x63,
|
||||
0x8c, 0x64, 0x53, 0x68, 0x03, 0xca, 0x86, 0x39, 0x33, 0xf4, 0x04, 0x4a, 0x5b, 0x7e, 0x6c, 0xd2,
|
||||
0xa7, 0xc8, 0x70, 0xad, 0x79, 0x5d, 0x71, 0xa8, 0x30, 0xdc, 0x45, 0x86, 0x6b, 0xef, 0x9f, 0x0e,
|
||||
0x34, 0xec, 0x98, 0x6a, 0x23, 0xc8, 0xe1, 0xb6, 0xb9, 0xc5, 0x3c, 0x4e, 0x78, 0xd6, 0xff, 0x37,
|
||||
0x73, 0x42, 0x99, 0x40, 0x5b, 0x97, 0x65, 0x4d, 0x34, 0xa6, 0x54, 0xba, 0x6d, 0xb8, 0x37, 0x13,
|
||||
0x7a, 0xa3, 0x6b, 0xf4, 0x02, 0xee, 0x8c, 0x07, 0xf0, 0xfc, 0x3c, 0xb9, 0x0b, 0x24, 0x0b, 0xb3,
|
||||
0x03, 0xfa, 0x13, 0xa8, 0xeb, 0x07, 0x4d, 0xbe, 0x98, 0x07, 0x8b, 0x06, 0x60, 0x23, 0x43, 0xa0,
|
||||
0x74, 0xcc, 0x2f, 0x4c, 0x36, 0xd4, 0x18, 0xae, 0xbd, 0xbf, 0x39, 0xfa, 0x5d, 0x32, 0x1c, 0xa9,
|
||||
0xf7, 0x5c, 0x4a, 0x7f, 0xa0, 0x13, 0xb0, 0xb4, 0x1d, 0x05, 0xca, 0x66, 0xdf, 0xa7, 0x79, 0xef,
|
||||
0x93, 0xe1, 0x48, 0x69, 0x98, 0x95, 0xda, 0xfa, 0x09, 0x43, 0x29, 0xf2, 0x1a, 0x4a, 0x1b, 0xbe,
|
||||
0xf2, 0x6d, 0x2e, 0xe4, 0x4c, 0x63, 0x1a, 0x91, 0x11, 0xd4, 0xe4, 0x7a, 0x45, 0x3f, 0xc2, 0x86,
|
||||
0x23, 0xe5, 0x3d, 0x87, 0xdb, 0x97, 0xb5, 0xcf, 0x70, 0xed, 0x0b, 0xa8, 0x67, 0xb4, 0xe0, 0xdd,
|
||||
0xde, 0xed, 0x20, 0xa0, 0xca, 0xf4, 0x52, 0xfb, 0x9a, 0x1e, 0x64, 0xd1, 0xd8, 0xf0, 0x6e, 0x41,
|
||||
0x03, 0x55, 0xa7, 0x11, 0xfc, 0x53, 0x01, 0x2a, 0x89, 0x8a, 0xd7, 0x13, 0x7e, 0x3f, 0xcd, 0xf3,
|
||||
0x7b, 0xda, 0xe5, 0x57, 0x50, 0xd2, 0x35, 0xc6, 0xba, 0x9c, 0x33, 0xca, 0x74, 0xfa, 0x19, 0x31,
|
||||
0x0d, 0x27, 0xdf, 0x42, 0x99, 0x71, 0xa9, 0xc7, 0x2e, 0xf3, 0x40, 0x79, 0x36, 0x5b, 0xd0, 0x60,
|
||||
0xc6, 0xc2, 0x56, 0x48, 0x8b, 0x77, 0x83, 0x41, 0xe4, 0x87, 0xb4, 0x34, 0x4f, 0xdc, 0x60, 0x32,
|
||||
0xe2, 0x86, 0x31, 0x0e, 0xf7, 0x5f, 0x1c, 0xa8, 0xcf, 0x0d, 0xf5, 0xfc, 0x27, 0xe4, 0xd4, 0xb3,
|
||||
0xb6, 0xf8, 0x3f, 0x3e, 0x6b, 0xff, 0x5c, 0x98, 0x54, 0x84, 0x13, 0x98, 0xbe, 0x4f, 0x43, 0x11,
|
||||
0x44, 0xca, 0xa6, 0x6c, 0x86, 0xa3, 0x0f, 0xda, 0x3e, 0xe9, 0xdb, 0xc6, 0xa0, 0x97, 0xfa, 0x9a,
|
||||
0xed, 0x08, 0xcd, 0xab, 0x63, 0x1a, 0x18, 0x62, 0x5c, 0xf6, 0x8b, 0xb6, 0xec, 0xeb, 0xd4, 0xf8,
|
||||
0x20, 0x79, 0x8c, 0x81, 0xab, 0x31, 0x5c, 0xeb, 0x4a, 0xbf, 0x23, 0x90, 0xbb, 0x80, 0xc2, 0x96,
|
||||
0x42, 0x2b, 0x67, 0x7d, 0x5a, 0x36, 0xe1, 0x68, 0x9f, 0x25, 0x56, 0xce, 0xfa, 0xb4, 0x92, 0x5a,
|
||||
0x39, 0x43, 0x2b, 0xfb, 0xea, 0x82, 0x56, 0x4d, 0x02, 0xee, 0xab, 0x0b, 0xdd, 0x8a, 0x98, 0x08,
|
||||
0xc3, 0x03, 0xbf, 0x77, 0x4c, 0x6b, 0xa6, 0x07, 0x26, 0xb4, 0x9e, 0x55, 0x75, 0xcc, 0x03, 0x3f,
|
||||
0xc4, 0x57, 0x4d, 0x95, 0x25, 0xa4, 0xb7, 0x06, 0xb5, 0x34, 0x55, 0x74, 0x77, 0xeb, 0xf4, 0xf1,
|
||||
0x53, 0x34, 0x58, 0xa1, 0xd3, 0x4f, 0xb2, 0xbc, 0x30, 0x9d, 0xe5, 0xc5, 0x4c, 0x96, 0xbf, 0x86,
|
||||
0xc6, 0x44, 0xd2, 0x68, 0x10, 0x13, 0x67, 0xd2, 0x2a, 0xc2, 0xb5, 0xe6, 0xb5, 0x45, 0x68, 0xde,
|
||||
0xed, 0x0d, 0x86, 0x6b, 0xef, 0x19, 0x34, 0x26, 0xd2, 0x65, 0x56, 0x5d, 0xf6, 0x9e, 0x42, 0xc3,
|
||||
0x34, 0xb8, 0xfc, 0xb2, 0xf3, 0x1f, 0x07, 0x96, 0x12, 0x8c, 0xad, 0x3c, 0xbf, 0x82, 0xea, 0x29,
|
||||
0x8f, 0x15, 0x3f, 0x4f, 0x7b, 0x11, 0x9d, 0x1e, 0x95, 0x3f, 0x22, 0x82, 0xa5, 0x48, 0xf2, 0x35,
|
||||
0x54, 0x25, 0xea, 0xe1, 0xc9, 0xac, 0xf3, 0x38, 0x4f, 0xca, 0xda, 0x4b, 0xf1, 0x64, 0x05, 0x4a,
|
||||
0xa1, 0x18, 0x48, 0xfc, 0xee, 0xf5, 0xd5, 0x87, 0x79, 0x72, 0xef, 0xc4, 0x80, 0x21, 0x90, 0xbc,
|
||||
0x85, 0xea, 0x99, 0x1f, 0x47, 0x41, 0x34, 0x48, 0xde, 0xfb, 0x4f, 0xf2, 0x84, 0xbe, 0x37, 0x38,
|
||||
0x96, 0x0a, 0x78, 0x0d, 0x7d, 0x89, 0x0e, 0x85, 0x8d, 0x89, 0xf7, 0x5b, 0x9d, 0xcb, 0x9a, 0xb4,
|
||||
0xee, 0x6f, 0x43, 0xc3, 0xdc, 0x87, 0x8f, 0x3c, 0x96, 0x7a, 0x72, 0x74, 0xe6, 0xdd, 0xd9, 0xf5,
|
||||
0x2c, 0x94, 0x4d, 0x4a, 0x7a, 0x3f, 0xda, 0x76, 0x97, 0x30, 0x74, 0x2e, 0x0d, 0xfd, 0xde, 0xb1,
|
||||
0x3f, 0x48, 0xbe, 0x53, 0x42, 0xea, 0x9d, 0x53, 0x6b, 0xcf, 0x5c, 0xdb, 0x84, 0xd4, 0xb9, 0x19,
|
||||
0xf3, 0xd3, 0x40, 0x8e, 0x87, 0xd8, 0x94, 0x5e, 0xfd, 0x6b, 0x05, 0xa0, 0x9d, 0x9e, 0x87, 0xec,
|
||||
0xc1, 0x02, 0xda, 0x23, 0xde, 0xdc, 0xe6, 0x89, 0x7e, 0xbb, 0xcf, 0xae, 0xd1, 0x60, 0xc9, 0x47,
|
||||
0x9d, 0xfc, 0x38, 0xf4, 0x90, 0xe7, 0x79, 0x65, 0x22, 0x3b, 0x37, 0xb9, 0x2f, 0xae, 0x40, 0x59,
|
||||
0xbd, 0x1f, 0xa0, 0x6c, 0xb2, 0x80, 0xe4, 0xd5, 0xc2, 0x6c, 0xde, 0xba, 0xcf, 0xe7, 0x83, 0x8c,
|
||||
0xd2, 0xcf, 0x1c, 0xc2, 0x6c, 0xa5, 0x24, 0xde, 0x9c, 0x56, 0x68, 0x6f, 0x4c, 0x5e, 0x00, 0x26,
|
||||
0xba, 0x4e, 0xd3, 0x21, 0xdf, 0x41, 0xd9, 0xd4, 0x3a, 0xf2, 0xc9, 0x6c, 0x81, 0x44, 0xdf, 0xfc,
|
||||
0xed, 0xa6, 0xf3, 0x99, 0x43, 0xde, 0x43, 0x49, 0x37, 0x79, 0x92, 0xd3, 0xb1, 0x32, 0x13, 0x82,
|
||||
0xeb, 0xcd, 0x83, 0xd8, 0x28, 0xfe, 0x08, 0x30, 0x1e, 0x35, 0x48, 0xce, 0xbf, 0x36, 0x53, 0x33,
|
||||
0x8b, 0xdb, 0xbc, 0x1a, 0x68, 0x0d, 0xbc, 0xd7, 0x7d, 0xf6, 0x50, 0x90, 0xdc, 0x0e, 0x9b, 0x5e,
|
||||
0x23, 0xd7, 0x9b, 0x07, 0xb1, 0xea, 0x8e, 0xa0, 0x31, 0xf1, 0xaf, 0x2e, 0xf9, 0x45, 0xbe, 0x93,
|
||||
0x97, 0xff, 0x24, 0x76, 0x5f, 0x5e, 0x0b, 0x6b, 0x2d, 0xa9, 0xec, 0xac, 0x66, 0xb7, 0x49, 0xeb,
|
||||
0x2a, 0xbf, 0x27, 0xff, 0xa1, 0x75, 0x57, 0xae, 0x8d, 0x37, 0x56, 0xd7, 0x4b, 0xbf, 0x2b, 0x0c,
|
||||
0x0f, 0x0e, 0xca, 0xf8, 0x67, 0xf7, 0x17, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x32, 0x20,
|
||||
0xaa, 0x8a, 0x17, 0x00, 0x00,
|
||||
0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x6b, 0xa8, 0xb6, 0xfd,
|
||||
0x30, 0xec, 0x8c, 0xa2, 0x1e, 0x2d, 0xe2, 0xc9, 0x1e, 0xcf, 0x3e, 0x59, 0x82, 0x62, 0x29, 0x9e,
|
||||
0xfc, 0x1e, 0x1a, 0x5a, 0x47, 0xdf, 0xda, 0x95, 0xb4, 0x84, 0x59, 0xf1, 0xea, 0x6a, 0xd7, 0x5a,
|
||||
0x13, 0x72, 0x9b, 0x91, 0x8a, 0x2f, 0xd8, 0xa4, 0x2e, 0x72, 0x17, 0x16, 0xd6, 0xc2, 0x50, 0x9c,
|
||||
0xd1, 0x85, 0xe5, 0x62, 0xb3, 0xc6, 0x0c, 0x41, 0xbe, 0x84, 0xca, 0x9a, 0x52, 0x5c, 0x2a, 0x49,
|
||||
0xcb, 0x68, 0xec, 0xd1, 0x6c, 0x63, 0x06, 0xc4, 0x12, 0x30, 0xd9, 0x85, 0x1a, 0xda, 0x5f, 0x8b,
|
||||
0x07, 0x92, 0x56, 0x50, 0xf2, 0xf3, 0x6b, 0x1c, 0x33, 0x95, 0x31, 0x47, 0x1c, 0xeb, 0x20, 0x9b,
|
||||
0x50, 0x6b, 0xfb, 0xbd, 0x23, 0xde, 0x89, 0xc5, 0x09, 0xad, 0xa2, 0xc2, 0x9f, 0xe7, 0x05, 0xae,
|
||||
0x77, 0xc4, 0xad, 0x42, 0xab, 0x26, 0x95, 0x24, 0x6b, 0x50, 0x41, 0x62, 0x5f, 0xd0, 0xda, 0xcd,
|
||||
0x94, 0x24, 0x72, 0xc4, 0x83, 0xc5, 0xf6, 0x20, 0x16, 0xa3, 0xe1, 0x9e, 0x1f, 0xf3, 0x48, 0x51,
|
||||
0xc0, 0xef, 0x3c, 0xc1, 0x23, 0x6f, 0xa1, 0xb2, 0x79, 0x3e, 0x14, 0xb1, 0x92, 0xb4, 0x3e, 0xef,
|
||||
0xe6, 0x1a, 0x90, 0x35, 0x60, 0x25, 0xc8, 0x63, 0x80, 0xcd, 0x73, 0x15, 0xfb, 0x5b, 0x42, 0x87,
|
||||
0x7d, 0x11, 0x3f, 0x47, 0x86, 0x43, 0x3a, 0x50, 0x7e, 0xe7, 0x1f, 0xf0, 0x50, 0xd2, 0x06, 0xea,
|
||||
0x6e, 0x5d, 0x23, 0xb0, 0x46, 0xc0, 0x18, 0xb2, 0xd2, 0x3a, 0xa9, 0x77, 0xb8, 0x3a, 0x13, 0xf1,
|
||||
0xf1, 0x7b, 0xd1, 0xe7, 0x74, 0xc9, 0x24, 0x75, 0x86, 0x45, 0x9e, 0x43, 0x63, 0x47, 0x98, 0xe0,
|
||||
0x05, 0xa1, 0xe2, 0x31, 0xbd, 0x85, 0x87, 0x99, 0x64, 0xe2, 0x45, 0x0e, 0x7d, 0x75, 0x28, 0xe2,
|
||||
0x13, 0x49, 0x6f, 0x23, 0x62, 0xcc, 0xd0, 0x19, 0xd4, 0xe5, 0xbd, 0x98, 0x2b, 0x49, 0xef, 0xcc,
|
||||
0xcb, 0x20, 0x03, 0x62, 0x09, 0x98, 0x50, 0xa8, 0x74, 0x8f, 0x4e, 0xba, 0xc1, 0x9f, 0x38, 0x25,
|
||||
0xcb, 0x4e, 0xb3, 0xc8, 0x12, 0x92, 0xbc, 0x84, 0x62, 0xb7, 0xbb, 0x45, 0x7f, 0x8a, 0xda, 0x1e,
|
||||
0xe4, 0x68, 0xeb, 0x6e, 0x31, 0x8d, 0x22, 0x04, 0x4a, 0xfb, 0xfe, 0x40, 0xd2, 0xbb, 0x78, 0x2e,
|
||||
0x5c, 0x93, 0xfb, 0x50, 0xde, 0xf7, 0xe3, 0x01, 0x57, 0xf4, 0x1e, 0xfa, 0x6c, 0x29, 0xf2, 0x06,
|
||||
0x2a, 0x1f, 0xc2, 0xe0, 0x24, 0x50, 0x92, 0xde, 0xc7, 0xab, 0xf9, 0x64, 0xb6, 0x72, 0x03, 0xda,
|
||||
0x1d, 0x2a, 0x96, 0xe0, 0xf5, 0x69, 0x31, 0xde, 0x3c, 0xa6, 0x3f, 0x43, 0x9d, 0x09, 0xa9, 0x77,
|
||||
0x6c, 0xb8, 0x28, 0x5d, 0x76, 0x9a, 0x55, 0x96, 0x90, 0xfa, 0x68, 0x7b, 0xa3, 0x30, 0xa4, 0x0f,
|
||||
0x90, 0x8d, 0x6b, 0xf3, 0xed, 0x75, 0x1a, 0xec, 0x8d, 0xe4, 0x11, 0x75, 0x71, 0x27, 0xc3, 0x19,
|
||||
0xef, 0xbf, 0x13, 0x7e, 0x9f, 0x3e, 0xcc, 0xee, 0x6b, 0x0e, 0xd9, 0x86, 0xc5, 0x2e, 0xf6, 0xa4,
|
||||
0x3d, 0xec, 0x44, 0xf4, 0x11, 0xfa, 0xf1, 0xa2, 0xa5, 0xdb, 0x56, 0x2b, 0x69, 0x5b, 0xda, 0x87,
|
||||
0x6c, 0xe7, 0x6a, 0x19, 0x30, 0x9b, 0x10, 0x4d, 0x8a, 0xea, 0x27, 0xe3, 0xa2, 0xea, 0x42, 0xf5,
|
||||
0xb7, 0x3a, 0xc9, 0x35, 0xfb, 0x31, 0xb2, 0x53, 0x5a, 0x27, 0xd3, 0x5a, 0x14, 0x09, 0xe5, 0x9b,
|
||||
0xa2, 0xfb, 0x04, 0xc3, 0x9d, 0x65, 0x91, 0x2f, 0xe1, 0xfe, 0x5e, 0x2c, 0x4e, 0x79, 0xe4, 0x47,
|
||||
0x3d, 0x9e, 0x94, 0x72, 0xcc, 0xbc, 0x65, 0xd4, 0x95, 0xb3, 0xeb, 0xfe, 0x06, 0xc8, 0x74, 0xf5,
|
||||
0xd2, 0xa7, 0x3b, 0xe6, 0x17, 0x49, 0xc9, 0x3f, 0xe6, 0x17, 0xba, 0x80, 0x9d, 0xfa, 0xe1, 0x28,
|
||||
0x29, 0xbc, 0x86, 0xf8, 0xba, 0xf0, 0x95, 0xe3, 0x7e, 0x03, 0x4b, 0x93, 0x85, 0xe5, 0x46, 0xd2,
|
||||
0x6f, 0xa0, 0x9e, 0xb9, 0x3d, 0x37, 0x11, 0xf5, 0xfe, 0xed, 0x40, 0x3d, 0x73, 0xc5, 0x31, 0x19,
|
||||
0x2f, 0x86, 0xdc, 0x0a, 0xe3, 0x9a, 0xac, 0xc3, 0xc2, 0x9a, 0x52, 0xb1, 0xee, 0x53, 0x3a, 0x9f,
|
||||
0x7f, 0x79, 0x65, 0xa1, 0x68, 0x21, 0xdc, 0x5c, 0x65, 0x23, 0xaa, 0x83, 0xbf, 0xc1, 0xa5, 0x0a,
|
||||
0x22, 0x0c, 0x35, 0xf6, 0x95, 0x1a, 0xcb, 0xb2, 0xdc, 0xaf, 0x00, 0xc6, 0x62, 0x37, 0xf2, 0xe1,
|
||||
0x9f, 0x0e, 0xdc, 0x99, 0xaa, 0x86, 0x33, 0x3d, 0xd9, 0x9a, 0xf4, 0x64, 0xf5, 0x9a, 0x95, 0x75,
|
||||
0xda, 0x9f, 0x1f, 0x71, 0xda, 0x1d, 0x28, 0x9b, 0x16, 0x34, 0xf3, 0x84, 0x2e, 0x54, 0x37, 0x02,
|
||||
0xe9, 0x1f, 0x84, 0xbc, 0x8f, 0xa2, 0x55, 0x96, 0xd2, 0xd8, 0xff, 0xf0, 0xf4, 0x26, 0x7a, 0x86,
|
||||
0xf0, 0x4c, 0xad, 0x21, 0x4b, 0x50, 0x48, 0x07, 0xa7, 0xc2, 0xf6, 0x86, 0x06, 0xeb, 0xae, 0x6f,
|
||||
0x5c, 0xad, 0x31, 0x43, 0x78, 0x1d, 0x28, 0x9b, 0xea, 0x35, 0x85, 0x77, 0xa1, 0xda, 0x09, 0x42,
|
||||
0x8e, 0xc3, 0x83, 0x39, 0x73, 0x4a, 0x6b, 0xf7, 0x36, 0xa3, 0x53, 0x6b, 0x56, 0x2f, 0xbd, 0xef,
|
||||
0xc7, 0x33, 0x82, 0x76, 0x03, 0xa7, 0x09, 0xeb, 0x06, 0xce, 0x10, 0xf7, 0xa1, 0xdc, 0x11, 0xf1,
|
||||
0x89, 0xaf, 0xac, 0x2e, 0x4b, 0xe9, 0xce, 0xb4, 0x3d, 0x88, 0x44, 0xcc, 0xbb, 0xca, 0x57, 0x23,
|
||||
0xe3, 0x49, 0x95, 0x4d, 0xf0, 0x3c, 0x0f, 0x96, 0xb6, 0x23, 0x39, 0xe4, 0x3d, 0x95, 0x3f, 0x8e,
|
||||
0xee, 0xc2, 0xad, 0x14, 0x63, 0x07, 0xd1, 0xcc, 0x3c, 0xe5, 0xdc, 0x7c, 0x9e, 0xfa, 0x87, 0x03,
|
||||
0xb5, 0xb4, 0x68, 0x92, 0x36, 0x94, 0xf1, 0x83, 0x25, 0x53, 0xed, 0xcb, 0x2b, 0xaa, 0x6c, 0xeb,
|
||||
0x23, 0xa2, 0x6d, 0xf3, 0x32, 0xa2, 0xee, 0x77, 0x50, 0xcf, 0xb0, 0x67, 0xe4, 0xc8, 0x6a, 0x36,
|
||||
0x47, 0x72, 0xbb, 0x8e, 0x31, 0x92, 0xcd, 0xa0, 0x0d, 0x28, 0x1b, 0xe6, 0xcc, 0xd0, 0x13, 0x28,
|
||||
0x6d, 0xf9, 0xb1, 0xc9, 0x9e, 0x22, 0xc3, 0xb5, 0xe6, 0x75, 0xc5, 0xa1, 0xc2, 0x70, 0x17, 0x19,
|
||||
0xae, 0xbd, 0x7f, 0x39, 0xd0, 0xb0, 0x23, 0xaa, 0x8d, 0x20, 0x87, 0xdb, 0xe6, 0x12, 0xf3, 0x38,
|
||||
0xe1, 0x59, 0xff, 0xdf, 0xcc, 0x09, 0x65, 0x02, 0x6d, 0x5d, 0x96, 0x35, 0xd1, 0x98, 0x52, 0xe9,
|
||||
0xb6, 0xe1, 0xde, 0x4c, 0xe8, 0x8d, 0x6e, 0xd1, 0x0b, 0xb8, 0x33, 0x1e, 0xbe, 0xf3, 0xf3, 0xe4,
|
||||
0x2e, 0x90, 0x2c, 0xcc, 0x0e, 0xe7, 0x4f, 0xa0, 0xae, 0x1f, 0x33, 0xf9, 0x62, 0x1e, 0x2c, 0x1a,
|
||||
0x80, 0x8d, 0x0c, 0x81, 0xd2, 0x31, 0xbf, 0x30, 0xd9, 0x50, 0x63, 0xb8, 0xf6, 0xfe, 0xee, 0xe8,
|
||||
0x37, 0xc9, 0x70, 0xa4, 0xde, 0x73, 0x29, 0xfd, 0x81, 0x4e, 0xc0, 0xd2, 0x76, 0x14, 0x28, 0x9b,
|
||||
0x7d, 0x9f, 0xe6, 0xbd, 0x4d, 0x86, 0x23, 0xa5, 0x61, 0x56, 0x6a, 0xeb, 0x27, 0x0c, 0xa5, 0xc8,
|
||||
0x6b, 0x28, 0x6d, 0xf8, 0xca, 0xb7, 0xb9, 0x90, 0x33, 0x8c, 0x69, 0x44, 0x46, 0x50, 0x93, 0xeb,
|
||||
0x15, 0xfd, 0x00, 0x1b, 0x8e, 0x94, 0xf7, 0x1c, 0x6e, 0x5f, 0xd6, 0x3e, 0xc3, 0xb5, 0x2f, 0xa0,
|
||||
0x9e, 0xd1, 0x82, 0x57, 0x7b, 0xb7, 0x83, 0x80, 0x2a, 0xd3, 0x4b, 0xed, 0x6b, 0x7a, 0x90, 0x45,
|
||||
0x63, 0xc3, 0xbb, 0x05, 0x0d, 0x54, 0x9d, 0x46, 0xf0, 0xcf, 0x05, 0xa8, 0x24, 0x2a, 0x5e, 0x4f,
|
||||
0xf8, 0xfd, 0x34, 0xcf, 0xef, 0x69, 0x97, 0x5f, 0x41, 0x49, 0x97, 0x18, 0xeb, 0x72, 0xce, 0x24,
|
||||
0xd3, 0xe9, 0x67, 0xc4, 0x34, 0x9c, 0xfc, 0x1a, 0xca, 0x8c, 0x4b, 0x3d, 0x75, 0x99, 0xd7, 0xc9,
|
||||
0xb3, 0xd9, 0x82, 0x06, 0x33, 0x16, 0xb6, 0x42, 0x5a, 0xbc, 0x1b, 0x0c, 0x22, 0x3f, 0xa4, 0xa5,
|
||||
0x79, 0xe2, 0x06, 0x93, 0x11, 0x37, 0x8c, 0x71, 0xb8, 0xff, 0xea, 0x40, 0x7d, 0x6e, 0xa8, 0xe7,
|
||||
0x3f, 0x1f, 0xa7, 0x9e, 0xb4, 0xc5, 0xff, 0xf3, 0x49, 0xfb, 0x97, 0xc2, 0xa4, 0x22, 0x1c, 0xc0,
|
||||
0xf4, 0x7d, 0x1a, 0x8a, 0x20, 0x52, 0x36, 0x65, 0x33, 0x1c, 0x7d, 0xd0, 0xf6, 0x49, 0xdf, 0xf6,
|
||||
0x05, 0xbd, 0xd4, 0xd7, 0x6c, 0x47, 0x68, 0x5e, 0x1d, 0xd3, 0xc0, 0x10, 0xe3, 0xaa, 0x5f, 0xb4,
|
||||
0x55, 0x5f, 0xa7, 0xc6, 0x07, 0xc9, 0x63, 0x0c, 0x5c, 0x8d, 0xe1, 0x5a, 0x57, 0xfa, 0x1d, 0x81,
|
||||
0xdc, 0x05, 0x14, 0xb6, 0x14, 0x5a, 0x39, 0xeb, 0xd3, 0xb2, 0x09, 0x47, 0xfb, 0x2c, 0xb1, 0x72,
|
||||
0xd6, 0xa7, 0x95, 0xd4, 0xca, 0x19, 0x5a, 0xd9, 0x57, 0x17, 0xb4, 0x6a, 0x12, 0x70, 0x5f, 0x5d,
|
||||
0xe8, 0x4e, 0xc4, 0x44, 0x18, 0x1e, 0xf8, 0xbd, 0x63, 0x5a, 0x33, 0x2d, 0x30, 0xa1, 0xf5, 0xa8,
|
||||
0xaa, 0x63, 0x1e, 0xf8, 0x21, 0x3e, 0x6a, 0xaa, 0x2c, 0x21, 0xbd, 0x35, 0xa8, 0xa5, 0xa9, 0xa2,
|
||||
0x9b, 0x5b, 0xa7, 0x8f, 0x9f, 0xa2, 0xc1, 0x0a, 0x9d, 0x7e, 0x92, 0xe5, 0x85, 0xe9, 0x2c, 0x2f,
|
||||
0x66, 0xb2, 0xfc, 0x35, 0x34, 0x26, 0x92, 0x46, 0x83, 0x98, 0x38, 0x93, 0x56, 0x11, 0xae, 0x35,
|
||||
0xaf, 0x2d, 0x42, 0xf3, 0x66, 0x6f, 0x30, 0x5c, 0x7b, 0xcf, 0xa0, 0x31, 0x91, 0x2e, 0xb3, 0xea,
|
||||
0xb2, 0xf7, 0x14, 0x1a, 0xa6, 0xc1, 0xe5, 0x97, 0x9d, 0xff, 0x3a, 0xb0, 0x94, 0x60, 0x6c, 0xe5,
|
||||
0xf9, 0x15, 0x54, 0x4f, 0x79, 0xac, 0xf8, 0x79, 0xda, 0x8b, 0xe8, 0xf4, 0xa4, 0xfc, 0x11, 0x11,
|
||||
0x2c, 0x45, 0xea, 0x27, 0xbc, 0x44, 0x3d, 0x3c, 0x19, 0x75, 0x1e, 0xe7, 0x49, 0x59, 0x7b, 0x29,
|
||||
0x9e, 0xac, 0x40, 0x29, 0x14, 0x03, 0x89, 0xdf, 0xbd, 0xbe, 0xfa, 0x30, 0x4f, 0xee, 0x9d, 0x18,
|
||||
0x30, 0x04, 0x92, 0xb7, 0x50, 0x3d, 0xf3, 0xe3, 0x28, 0x88, 0x06, 0xc9, 0x73, 0xff, 0x49, 0x9e,
|
||||
0xd0, 0x77, 0x06, 0xc7, 0x52, 0x01, 0xaf, 0xa1, 0x2f, 0xd1, 0xa1, 0xb0, 0x31, 0xf1, 0x7e, 0xa7,
|
||||
0x73, 0x59, 0x93, 0xd6, 0xfd, 0x6d, 0x68, 0x98, 0xfb, 0xf0, 0x91, 0xc7, 0x52, 0x0f, 0x8e, 0xce,
|
||||
0xbc, 0x3b, 0xbb, 0x9e, 0x85, 0xb2, 0x49, 0x49, 0xef, 0x07, 0xdb, 0xee, 0x12, 0x86, 0xce, 0xa5,
|
||||
0xa1, 0xdf, 0x3b, 0xf6, 0x07, 0xc9, 0x77, 0x4a, 0x48, 0xbd, 0x73, 0x6a, 0xed, 0x99, 0x6b, 0x9b,
|
||||
0x90, 0x3a, 0x37, 0x63, 0x7e, 0x1a, 0xc8, 0xf1, 0x0c, 0x9b, 0xd2, 0xab, 0x7f, 0xab, 0x00, 0xb4,
|
||||
0xd3, 0xf3, 0x90, 0x3d, 0x58, 0x40, 0x7b, 0xc4, 0x9b, 0xdb, 0x3c, 0xd1, 0x6f, 0xf7, 0xd9, 0x35,
|
||||
0x1a, 0x2c, 0xf9, 0xa8, 0x93, 0x1f, 0x87, 0x1e, 0xf2, 0x3c, 0xaf, 0x4c, 0x64, 0xe7, 0x26, 0xf7,
|
||||
0xc5, 0x15, 0x28, 0xab, 0xf7, 0x03, 0x94, 0x4d, 0x16, 0x90, 0xbc, 0x5a, 0x98, 0xcd, 0x5b, 0xf7,
|
||||
0xf9, 0x7c, 0x90, 0x51, 0xfa, 0x99, 0x43, 0x98, 0xad, 0x94, 0xc4, 0x9b, 0xd3, 0x0a, 0xed, 0x8d,
|
||||
0xc9, 0x0b, 0xc0, 0x44, 0xd7, 0x69, 0x3a, 0xe4, 0x5b, 0x28, 0x9b, 0x5a, 0x47, 0x3e, 0x99, 0x2d,
|
||||
0x90, 0xe8, 0x9b, 0xbf, 0xdd, 0x74, 0x3e, 0x73, 0xc8, 0x7b, 0x28, 0xe9, 0x26, 0x4f, 0x72, 0x3a,
|
||||
0x56, 0x66, 0x42, 0x70, 0xbd, 0x79, 0x10, 0x1b, 0xc5, 0x1f, 0x00, 0xc6, 0xa3, 0x06, 0xc9, 0xf9,
|
||||
0xd3, 0x66, 0x6a, 0x66, 0x71, 0x9b, 0x57, 0x03, 0xad, 0x81, 0xf7, 0xba, 0xcf, 0x1e, 0x0a, 0x92,
|
||||
0xdb, 0x61, 0xd3, 0x6b, 0xe4, 0x7a, 0xf3, 0x20, 0x56, 0xdd, 0x11, 0x34, 0x26, 0xfe, 0xd1, 0x25,
|
||||
0xbf, 0xc8, 0x77, 0xf2, 0xf2, 0x1f, 0xc4, 0xee, 0xcb, 0x6b, 0x61, 0xad, 0x25, 0x95, 0x9d, 0xd5,
|
||||
0xec, 0x36, 0x69, 0x5d, 0xe5, 0xf7, 0xe4, 0xbf, 0xb3, 0xee, 0xca, 0xb5, 0xf1, 0xc6, 0xea, 0x7a,
|
||||
0xe9, 0xfb, 0xc2, 0xf0, 0xe0, 0xa0, 0x8c, 0x7f, 0x74, 0x7f, 0xf1, 0xbf, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xc9, 0xe6, 0x4b, 0xb6, 0x86, 0x17, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@@ -49,7 +49,7 @@ message BuildRequest {
|
||||
message BuildOptions {
|
||||
string ContextPath = 1;
|
||||
string DockerfileName = 2;
|
||||
PrintFunc PrintFunc = 3;
|
||||
CallFunc CallFunc = 3;
|
||||
map<string, string> NamedContexts = 4;
|
||||
|
||||
repeated string Allow = 5;
|
||||
@@ -111,7 +111,7 @@ message Secret {
|
||||
string Env = 3;
|
||||
}
|
||||
|
||||
message PrintFunc {
|
||||
message CallFunc {
|
||||
string Name = 1;
|
||||
string Format = 2;
|
||||
bool IgnoreStatus = 3;
|
||||
|
@@ -207,6 +207,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
||||
|
||||
if cfg.signal != nil {
|
||||
eg.Go(func() error {
|
||||
names := signalNames()
|
||||
for {
|
||||
var sig syscall.Signal
|
||||
select {
|
||||
@@ -216,7 +217,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
name := sigToName[sig]
|
||||
name := names[sig]
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
@@ -380,12 +381,12 @@ func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
|
||||
})
|
||||
}
|
||||
|
||||
var sigToName = map[syscall.Signal]string{}
|
||||
|
||||
func init() {
|
||||
func signalNames() map[syscall.Signal]string {
|
||||
m := make(map[syscall.Signal]string, len(signal.SignalMap))
|
||||
for name, value := range signal.SignalMap {
|
||||
sigToName[value] = name
|
||||
m[value] = name
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type debugStream struct {
|
||||
|
@@ -31,7 +31,7 @@ group "default" {
|
||||
}
|
||||
|
||||
group "validate" {
|
||||
targets = ["lint", "lint-gopls", "validate-vendor", "validate-docs"]
|
||||
targets = ["lint", "lint-gopls", "validate-golangci", "validate-vendor", "validate-docs"]
|
||||
}
|
||||
|
||||
target "lint" {
|
||||
@@ -51,6 +51,14 @@ target "lint" {
|
||||
] : []
|
||||
}
|
||||
|
||||
target "validate-golangci" {
|
||||
description = "Validate .golangci.yml schema (does not run Go linter)"
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
||||
target = "validate-golangci"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "lint-gopls" {
|
||||
inherits = ["lint"]
|
||||
target = "gopls-analyze"
|
||||
@@ -209,3 +217,18 @@ target "integration-test" {
|
||||
inherits = ["integration-test-base"]
|
||||
target = "integration-test"
|
||||
}
|
||||
|
||||
variable "GOVULNCHECK_FORMAT" {
|
||||
default = null
|
||||
}
|
||||
|
||||
target "govulncheck" {
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/govulncheck.Dockerfile"
|
||||
target = "output"
|
||||
args = {
|
||||
FORMAT = GOVULNCHECK_FORMAT
|
||||
}
|
||||
no-cache-filter = ["run"]
|
||||
output = ["${DESTDIR}"]
|
||||
}
|
||||
|
@@ -443,8 +443,7 @@ COPY --from=src . .
|
||||
|
||||
#### Use another target as base
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> You should prefer to use regular multi-stage builds over this option. You can
|
||||
> Use this feature when you have multiple Dockerfiles that can't be easily
|
||||
> merged into one.
|
||||
@@ -506,6 +505,25 @@ $ docker buildx bake --print -f - <<< 'target "default" {}'
|
||||
}
|
||||
```
|
||||
|
||||
### `target.entitlements`
|
||||
|
||||
Entitlements are permissions that the build process requires to run.
|
||||
|
||||
Currently supported entitlements are:
|
||||
|
||||
- `network.host`: Allows the build to use commands that access the host network. In Dockerfile, use [`RUN --network=host`](https://docs.docker.com/reference/dockerfile/#run---networkhost) to run a command with host network enabled.
|
||||
|
||||
- `security.insecure`: Allows the build to run commands in privileged containers that are not limited by the default security sandbox. Such container may potentially access and modify system resources. In Dockerfile, use [`RUN --security=insecure`](https://docs.docker.com/reference/dockerfile/#run---security) to run a command in a privileged container.
|
||||
|
||||
```hcl
|
||||
target "integration-tests" {
|
||||
# this target requires privileged containers to run nested containers
|
||||
entitlements = ["security.insecure"]
|
||||
}
|
||||
```
|
||||
|
||||
Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process.
|
||||
|
||||
### `target.inherits`
|
||||
|
||||
A target can inherit attributes from other targets.
|
||||
@@ -750,6 +768,27 @@ target "app" {
|
||||
}
|
||||
```
|
||||
|
||||
### `target.network`
|
||||
|
||||
Specify the network mode for the whole build request. This will override the default network mode
|
||||
for all the `RUN` instructions in the Dockerfile. Accepted values are `default`, `host`, and `none`.
|
||||
|
||||
Usually, a better approach to set the network mode for your build steps is to instead use `RUN --network=<value>`
|
||||
in your Dockerfile. This way, you can set the network mode for individual build steps and everyone building
|
||||
the Dockerfile gets consistent behavior without needing to pass additional flags to the build command.
|
||||
|
||||
If you set network mode to `host` in your Bake file, you must also grant `network.host` entitlement when
|
||||
invoking the `bake` command. This is because `host` network mode requires elevated privileges and can be a security risk.
|
||||
You can pass `--allow=network.host` to the `docker buildx bake` command to grant the entitlement, or you can
|
||||
confirm the entitlement when prompted if you are using an interactive terminal.
|
||||
|
||||
```hcl
|
||||
target "app" {
|
||||
# make sure this build does not access internet
|
||||
network = "none"
|
||||
}
|
||||
```
|
||||
|
||||
### `target.no-cache-filter`
|
||||
|
||||
Don't use build cache for the specified stages.
|
||||
@@ -832,8 +871,8 @@ This lets you [mount the secret][run_mount_secret] in your Dockerfile.
|
||||
```dockerfile
|
||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
||||
aws cloudfront create-invalidation ...
|
||||
RUN --mount=type=secret,id=KUBECONFIG \
|
||||
KUBECONFIG=$(cat /run/secrets/KUBECONFIG) helm upgrade --install
|
||||
RUN --mount=type=secret,id=KUBECONFIG,env=KUBECONFIG \
|
||||
helm upgrade --install
|
||||
```
|
||||
|
||||
### `target.shm-size`
|
||||
@@ -853,8 +892,7 @@ target "default" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -919,14 +957,12 @@ target "app" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> If you do not provide a `hard limit`, the `soft limit` is used
|
||||
> for both values. If no `ulimits` are set, they are inherited from
|
||||
> the default `ulimits` set on the daemon.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -1114,8 +1150,7 @@ target "webapp-dev" {
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> See [User defined HCL functions][hcl-funcs] page for more details.
|
||||
|
||||
<!-- external links -->
|
||||
|
@@ -4,8 +4,7 @@ To assist with creating and debugging complex builds, Buildx provides a
|
||||
debugger to help you step through the build process and easily inspect the
|
||||
state of the build environment at any point.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The debug monitor is a new experimental feature in recent versions of Buildx.
|
||||
> There are rough edges, known bugs, and missing features. Please try it out
|
||||
> and let us know what you think!
|
||||
|
@@ -32,6 +32,7 @@ Extended build capabilities with BuildKit
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -15,9 +15,11 @@ Build from a file
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--allow` | `stringArray` | | Allow build to access specified resources |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||
| `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
|
||||
@@ -41,8 +43,7 @@ as part of the build.
|
||||
Read [High-level build options with Bake](https://docs.docker.com/build/bake/)
|
||||
guide for introduction to writing bake files.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> `buildx bake` command may receive backwards incompatible features in the future
|
||||
> if needed. We are looking for feedback on improving the command and extending
|
||||
> the functionality further.
|
||||
@@ -163,8 +164,7 @@ $ cat metadata.json
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> (`buildx.build.provenance`) includes minimal provenance by default. Set the
|
||||
> `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior:
|
||||
@@ -172,8 +172,7 @@ $ cat metadata.json
|
||||
> * `max` sets full provenance.
|
||||
> * `disabled`, `false` or `0` does not set any provenance.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
@@ -27,6 +27,7 @@ Start a build
|
||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
@@ -581,8 +582,7 @@ $ cat metadata.json
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example)
|
||||
> (`buildx.build.provenance`) includes minimal provenance by default. Set the
|
||||
> `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior:
|
||||
@@ -591,6 +591,11 @@ $ cat metadata.json
|
||||
> - `max` sets full provenance.
|
||||
> - `disabled`, `false` or `0` doesn't set any provenance.
|
||||
|
||||
> [!NOTE]
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="network"></a> Set the networking mode for the RUN instructions during build (--network)
|
||||
|
||||
Available options for the networking mode are:
|
||||
@@ -601,12 +606,6 @@ Available options for the networking mode are:
|
||||
|
||||
Find more details in the [Dockerfile reference](https://docs.docker.com/reference/dockerfile/#run---network).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="no-cache-filter"></a> Ignore build cache for specific stages (--no-cache-filter)
|
||||
|
||||
The `--no-cache-filter` lets you specify one or more stages of a multi-stage
|
||||
@@ -832,8 +831,7 @@ $ docker buildx build --platform=darwin .
|
||||
Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
|
||||
output (default `auto`).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
@@ -851,8 +849,7 @@ $ docker buildx build --load --progress=plain .
|
||||
...
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Check also the [`BUILDKIT_COLORS`](https://docs.docker.com/build/building/variables/#buildkit_colors)
|
||||
> environment variable for modifying the colors of the terminal output.
|
||||
|
||||
@@ -950,8 +947,8 @@ Attribute keys:
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM node:alpine
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=secret,id=SECRET_TOKEN \
|
||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
||||
--mount=type=secret,id=SECRET_TOKEN,env=SECRET_TOKEN \
|
||||
yarn run test
|
||||
```
|
||||
|
||||
```console
|
||||
@@ -967,8 +964,7 @@ The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||
(gigabytes). If you omit the unit, the system uses bytes.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
@@ -1054,14 +1050,12 @@ instructions and are specified with a soft and hard limit as such:
|
||||
$ docker buildx build --ulimit nofile=1024:1024 .
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> If you don't provide a `hard limit`, the `soft limit` is used
|
||||
> for both values. If no `ulimits` are set, they're inherited from
|
||||
> the default `ulimits` set on the daemon.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> In most cases, it is recommended to let the builder automatically determine
|
||||
> the appropriate configurations. Manual adjustments should only be considered
|
||||
> when specific performance tuning is required for complex build scenarios.
|
||||
|
@@ -15,6 +15,7 @@ Create a new builder instance
|
||||
| `--bootstrap` | `bool` | | Boot builder after creation |
|
||||
| [`--buildkitd-config`](#buildkitd-config) | `string` | | BuildKit daemon config file |
|
||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | BuildKit daemon flags |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) |
|
||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
||||
| [`--leave`](#leave) | `bool` | | Remove a node from builder instead of changing it |
|
||||
@@ -101,8 +102,7 @@ value is `auto` and can be one of `bridge`, `cni`, `host`:
|
||||
--buildkitd-flags '--oci-worker-net bridge'
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> Network mode "bridge" is supported since BuildKit v0.13 and will become the
|
||||
> default in next v0.14.
|
||||
|
||||
|
@@ -15,6 +15,7 @@ Start debugger (EXPERIMENTAL)
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
|
||||
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
|
||||
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
|
||||
|
@@ -23,6 +23,7 @@ Start a build
|
||||
| `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
|
||||
| `--check` | `bool` | | Shorthand for `--call=check` |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
|
||||
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to a file |
|
||||
|
@@ -5,11 +5,12 @@ Proxy current stdio streams to builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:-------------|:---------|:--------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--platform` | `string` | | Target platform: this is used for node selection |
|
||||
| `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:----------------------------------------------------------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--platform` | `string` | | Target platform: this is used for node selection |
|
||||
| `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,6 +12,7 @@ Disk usage
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--filter` | `filter` | | Provide filter values |
|
||||
| [`--verbose`](#verbose) | `bool` | | Provide a more verbose output |
|
||||
|
||||
|
@@ -20,6 +20,7 @@ Commands to work on images in registry
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -14,6 +14,7 @@ Create a new image based on source images
|
||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||
| [`--append`](#append) | `bool` | | Append to existing manifest |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--dry-run`](#dry-run) | `bool` | | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
||||
| `--prefer-index` | `bool` | `true` | When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy |
|
||||
@@ -52,8 +53,7 @@ $ docker buildx imagetools create \
|
||||
foo/bar:alpha foo/bar:beta foo/bar:gamma
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The `imagetools create` command supports adding annotations to the image
|
||||
> index and descriptor, using the following type prefixes:
|
||||
>
|
||||
|
@@ -12,6 +12,7 @@ Show details of an image in the registry
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:----------------|:----------------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
||||
| [`--raw`](#raw) | `bool` | | Show original, unformatted JSON manifest |
|
||||
|
||||
|
@@ -13,6 +13,7 @@ Inspect current builder instance
|
||||
|:----------------------------|:---------|:--------|:--------------------------------------------|
|
||||
| [`--bootstrap`](#bootstrap) | `bool` | | Ensure builder has booted before inspecting |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -43,8 +44,7 @@ name of the builder to inspect to get information about that builder.
|
||||
The following example shows information about a builder instance named
|
||||
`elated_tesla`:
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> [!NOTE]
|
||||
> The asterisk (`*`) next to node build platform(s) indicate they have been
|
||||
> manually set during `buildx create`. Otherwise the platforms were
|
||||
> automatically detected.
|
||||
|
@@ -9,9 +9,10 @@ List builder instances
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:---------|:--------|:------------------|
|
||||
| [`--format`](#format) | `string` | `table` | Format the output |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:---------|:--------|:---------------------|
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--format`](#format) | `string` | `table` | Format the output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -13,6 +13,7 @@ Remove build cache
|
||||
|:------------------------|:---------|:--------|:------------------------------------------|
|
||||
| `-a`, `--all` | `bool` | | Include internal/frontend images |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | `bool` | | Do not prompt for confirmation |
|
||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
||||
|
@@ -13,6 +13,7 @@ Remove one or more builder instances
|
||||
|:------------------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--all-inactive`](#all-inactive) | `bool` | | Remove all inactive builders |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`-f`](#force), [`--force`](#force) | `bool` | | Do not prompt for confirmation |
|
||||
| [`--keep-daemon`](#keep-daemon) | `bool` | | Keep the BuildKit daemon running |
|
||||
| [`--keep-state`](#keep-state) | `bool` | | Keep BuildKit state |
|
||||
|
@@ -12,6 +12,7 @@ Stop builder instance
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-----------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,6 +12,7 @@ Set the current builder instance
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------|:---------|:--------|:-------------------------------------------|
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--default` | `bool` | | Set builder as default for current context |
|
||||
| `--global` | `bool` | | Builder persists context changes |
|
||||
|
||||
|
@@ -7,6 +7,12 @@ docker buildx version
|
||||
<!---MARKER_GEN_START-->
|
||||
Show buildx version information
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:-------|:--------|:---------------------|
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
|
@@ -29,7 +29,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
_, err := d.DockerAPI.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(driver.ErrNotConnecting, err.Error())
|
||||
return nil, errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
||||
}
|
||||
return &driver.Info{
|
||||
Status: driver.Running,
|
||||
@@ -39,7 +39,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
||||
v, err := d.DockerAPI.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(driver.ErrNotConnecting, err.Error())
|
||||
return "", errors.Wrapf(driver.ErrNotConnecting{}, err.Error())
|
||||
}
|
||||
if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" {
|
||||
return bkversion, nil
|
||||
|
@@ -14,8 +14,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNotRunning = errors.Errorf("driver not running")
|
||||
var ErrNotConnecting = errors.Errorf("driver not connecting")
|
||||
type ErrNotRunning struct{}
|
||||
|
||||
func (ErrNotRunning) Error() string {
|
||||
return "driver not running"
|
||||
}
|
||||
|
||||
type ErrNotConnecting struct{}
|
||||
|
||||
func (ErrNotConnecting) Error() string {
|
||||
return "driver not connecting"
|
||||
}
|
||||
|
||||
type Status int
|
||||
|
||||
@@ -105,7 +114,7 @@ func Boot(ctx, clientContext context.Context, d *DriverHandle, pw progress.Write
|
||||
|
||||
c, err := d.Client(clientContext)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrNotRunning && try <= 2 {
|
||||
if errors.Is(err, ErrNotRunning{}) && try <= 2 {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@@ -38,7 +38,8 @@ const (
|
||||
|
||||
type Driver struct {
|
||||
driver.InitConfig
|
||||
factory driver.Factory
|
||||
factory driver.Factory
|
||||
clientConfig ClientConfig
|
||||
|
||||
// if you add fields, remember to update docs:
|
||||
// https://github.com/docker/docs/blob/main/content/build/drivers/kubernetes.md
|
||||
@@ -198,7 +199,7 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
|
||||
func (d *Driver) Dial(ctx context.Context) (net.Conn, error) {
|
||||
restClient := d.clientset.CoreV1().RESTClient()
|
||||
restClientConfig, err := d.KubeClientConfig.ClientConfig()
|
||||
restClientConfig, err := d.clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -2,19 +2,22 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/driver/bkimage"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,11 +26,31 @@ const (
|
||||
defaultTimeout = 120 * time.Second
|
||||
)
|
||||
|
||||
type ClientConfig interface {
|
||||
ClientConfig() (*rest.Config, error)
|
||||
Namespace() (string, bool, error)
|
||||
}
|
||||
|
||||
type ClientConfigInCluster struct{}
|
||||
|
||||
func (k ClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
|
||||
func (k ClientConfigInCluster) Namespace() (string, bool, error) {
|
||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return strings.TrimSpace(string(namespace)), true, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
driver.Register(&factory{})
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
cc ClientConfig // used for testing
|
||||
}
|
||||
|
||||
func (*factory) Name() string {
|
||||
@@ -46,18 +69,50 @@ func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.
|
||||
}
|
||||
|
||||
func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver, error) {
|
||||
if cfg.KubeClientConfig == nil {
|
||||
return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName)
|
||||
var err error
|
||||
var cc ClientConfig
|
||||
if f.cc != nil {
|
||||
cc = f.cc
|
||||
} else {
|
||||
cc, err = ctxkube.ConfigFromEndpoint(cfg.EndpointAddr, cfg.ContextStore)
|
||||
if err != nil {
|
||||
// err is returned if cfg.EndpointAddr is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME(@AkihiroSuda): cfg should retain real context name.
|
||||
cc, err = ctxkube.ConfigFromEndpoint("default", cfg.ContextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
tryToUseConfigInCluster := false
|
||||
if cc == nil {
|
||||
tryToUseConfigInCluster = true
|
||||
} else {
|
||||
if _, err := cc.ClientConfig(); err != nil {
|
||||
tryToUseConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseConfigInCluster {
|
||||
ccInCluster := ClientConfigInCluster{}
|
||||
if _, err := ccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
cc = ccInCluster
|
||||
}
|
||||
}
|
||||
if cc == nil {
|
||||
return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName)
|
||||
}
|
||||
}
|
||||
|
||||
deploymentName, err := buildxNameToDeploymentName(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespace, _, err := cfg.KubeClientConfig.Namespace()
|
||||
namespace, _, err := cc.Namespace()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot determine Kubernetes namespace, specify manually")
|
||||
}
|
||||
restClientConfig, err := cfg.KubeClientConfig.ClientConfig()
|
||||
restClientConfig, err := cc.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -67,9 +122,10 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
factory: f,
|
||||
InitConfig: cfg,
|
||||
clientset: clientset,
|
||||
factory: f,
|
||||
clientConfig: cc,
|
||||
InitConfig: cfg,
|
||||
clientset: clientset,
|
||||
}
|
||||
|
||||
deploymentOpt, loadbalance, namespace, defaultLoad, timeout, err := f.processDriverOpts(deploymentName, namespace, cfg)
|
||||
|
@@ -11,29 +11,28 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type mockKubeClientConfig struct {
|
||||
type mockClientConfig struct {
|
||||
clientConfig *rest.Config
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (r *mockKubeClientConfig) ClientConfig() (*rest.Config, error) {
|
||||
func (r *mockClientConfig) ClientConfig() (*rest.Config, error) {
|
||||
return r.clientConfig, nil
|
||||
}
|
||||
|
||||
func (r *mockKubeClientConfig) Namespace() (string, bool, error) {
|
||||
func (r *mockClientConfig) Namespace() (string, bool, error) {
|
||||
return r.namespace, true, nil
|
||||
}
|
||||
|
||||
func TestFactory_processDriverOpts(t *testing.T) {
|
||||
kcc := mockKubeClientConfig{
|
||||
clientConfig: &rest.Config{},
|
||||
}
|
||||
|
||||
cfg := driver.InitConfig{
|
||||
Name: driver.BuilderName("test"),
|
||||
KubeClientConfig: &kcc,
|
||||
Name: driver.BuilderName("test"),
|
||||
}
|
||||
f := factory{
|
||||
cc: &mockClientConfig{
|
||||
clientConfig: &rest.Config{},
|
||||
},
|
||||
}
|
||||
f := factory{}
|
||||
|
||||
t.Run(
|
||||
"ValidOptions", func(t *testing.T) {
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -53,10 +52,17 @@ const (
|
||||
LabelApp = "app"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrReservedAnnotationPlatform = errors.Errorf("the annotation \"%s\" is reserved and cannot be customized", AnnotationPlatform)
|
||||
ErrReservedLabelApp = errors.Errorf("the label \"%s\" is reserved and cannot be customized", LabelApp)
|
||||
)
|
||||
type ErrReservedAnnotationPlatform struct{}
|
||||
|
||||
func (ErrReservedAnnotationPlatform) Error() string {
|
||||
return fmt.Sprintf("the annotation %q is reserved and cannot be customized", AnnotationPlatform)
|
||||
}
|
||||
|
||||
type ErrReservedLabelApp struct{}
|
||||
|
||||
func (ErrReservedLabelApp) Error() string {
|
||||
return fmt.Sprintf("the label %q is reserved and cannot be customized", LabelApp)
|
||||
}
|
||||
|
||||
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) {
|
||||
labels := map[string]string{
|
||||
@@ -73,14 +79,14 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
||||
|
||||
for k, v := range opt.CustomAnnotations {
|
||||
if k == AnnotationPlatform {
|
||||
return nil, nil, ErrReservedAnnotationPlatform
|
||||
return nil, nil, ErrReservedAnnotationPlatform{}
|
||||
}
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
for k, v := range opt.CustomLabels {
|
||||
if k == LabelApp {
|
||||
return nil, nil, ErrReservedLabelApp
|
||||
return nil, nil, ErrReservedLabelApp{}
|
||||
}
|
||||
labels[k] = v
|
||||
}
|
||||
|
@@ -2,17 +2,15 @@ package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/tracing/delegated"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Factory interface {
|
||||
@@ -28,38 +26,18 @@ type BuildkitConfig struct {
|
||||
// Rootless bool
|
||||
}
|
||||
|
||||
type KubeClientConfig interface {
|
||||
ClientConfig() (*rest.Config, error)
|
||||
Namespace() (string, bool, error)
|
||||
}
|
||||
|
||||
type KubeClientConfigInCluster struct{}
|
||||
|
||||
func (k KubeClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
|
||||
func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return strings.TrimSpace(string(namespace)), true, nil
|
||||
}
|
||||
|
||||
type InitConfig struct {
|
||||
// This object needs updates to be generic for different drivers
|
||||
Name string
|
||||
EndpointAddr string
|
||||
DockerAPI dockerclient.APIClient
|
||||
KubeClientConfig KubeClientConfig
|
||||
BuildkitdFlags []string
|
||||
Files map[string][]byte
|
||||
DriverOpts map[string]string
|
||||
Auth Auth
|
||||
Platforms []specs.Platform
|
||||
ContextPathHash string // can be used for determining pods in the driver instance
|
||||
DialMeta map[string][]string
|
||||
Name string
|
||||
EndpointAddr string
|
||||
DockerAPI dockerclient.APIClient
|
||||
ContextStore store.Reader
|
||||
BuildkitdFlags []string
|
||||
Files map[string][]byte
|
||||
DriverOpts map[string]string
|
||||
Auth Auth
|
||||
Platforms []specs.Platform
|
||||
ContextPathHash string
|
||||
DialMeta map[string][]string
|
||||
}
|
||||
|
||||
var drivers map[string]Factory
|
||||
@@ -104,28 +82,15 @@ func GetFactory(name string, instanceRequired bool) (Factory, error) {
|
||||
return nil, errors.Errorf("failed to find driver %q", name)
|
||||
}
|
||||
|
||||
func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, buildkitdFlags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string, dialMeta map[string][]string) (*DriverHandle, error) {
|
||||
ic := InitConfig{
|
||||
EndpointAddr: endpointAddr,
|
||||
DockerAPI: api,
|
||||
KubeClientConfig: kcc,
|
||||
Name: name,
|
||||
BuildkitdFlags: buildkitdFlags,
|
||||
DriverOpts: do,
|
||||
Auth: auth,
|
||||
Platforms: platforms,
|
||||
ContextPathHash: contextPathHash,
|
||||
DialMeta: dialMeta,
|
||||
Files: files,
|
||||
}
|
||||
func GetDriver(ctx context.Context, f Factory, cfg InitConfig) (*DriverHandle, error) {
|
||||
if f == nil {
|
||||
var err error
|
||||
f, err = GetDefaultFactory(ctx, endpointAddr, api, false, dialMeta)
|
||||
f, err = GetDefaultFactory(ctx, cfg.EndpointAddr, cfg.DockerAPI, false, cfg.DialMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := f.New(ctx, ic)
|
||||
d, err := f.New(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -7,15 +7,15 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
// import connhelpers for special url schemes
|
||||
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
|
||||
_ "github.com/moby/buildkit/client/connhelper/kubepod"
|
||||
_ "github.com/moby/buildkit/client/connhelper/ssh"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
util "github.com/docker/buildx/driver/remote/util"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
// import connhelpers for special url schemes
|
||||
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
|
||||
_ "github.com/moby/buildkit/client/connhelper/kubepod"
|
||||
_ "github.com/moby/buildkit/client/connhelper/ssh"
|
||||
)
|
||||
|
||||
const prioritySupported = 20
|
||||
|
@@ -1,7 +1,7 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,18 +1,19 @@
|
||||
package remote
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"slices"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var schemes = map[string]struct{}{
|
||||
"tcp": {},
|
||||
"unix": {},
|
||||
"ssh": {},
|
||||
"docker-container": {},
|
||||
"kube-pod": {},
|
||||
"npipe": {},
|
||||
var schemes = []string{
|
||||
"docker-container",
|
||||
"kube-pod",
|
||||
"npipe",
|
||||
"ssh",
|
||||
"tcp",
|
||||
"unix",
|
||||
}
|
||||
|
||||
func IsValidEndpoint(ep string) error {
|
||||
@@ -20,7 +21,7 @@ func IsValidEndpoint(ep string) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
if _, ok := schemes[endpoint.Scheme]; !ok {
|
||||
if _, ok := slices.BinarySearch(schemes, endpoint.Scheme); !ok {
|
||||
return errors.Errorf("unrecognized url scheme %s", endpoint.Scheme)
|
||||
}
|
||||
return nil
|
||||
|
12
driver/remote/util/endpoint_test.go
Normal file
12
driver/remote/util/endpoint_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package remoteutil
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSchemes(t *testing.T) {
|
||||
require.True(t, slices.IsSorted(schemes))
|
||||
}
|
40
go.mod
40
go.mod
@@ -6,19 +6,19 @@ require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6
|
||||
github.com/compose-spec/compose-go/v2 v2.1.3
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6
|
||||
github.com/containerd/console v1.0.4
|
||||
github.com/containerd/containerd v1.7.19
|
||||
github.com/containerd/containerd v1.7.21
|
||||
github.com/containerd/continuity v0.4.3
|
||||
github.com/containerd/errdefs v0.1.0
|
||||
github.com/containerd/log v0.1.0
|
||||
github.com/containerd/platforms v0.2.1
|
||||
github.com/containerd/typeurl/v2 v2.1.1
|
||||
github.com/containerd/typeurl/v2 v2.2.0
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.0.3+incompatible
|
||||
github.com/docker/cli v27.2.0+incompatible
|
||||
github.com/docker/cli-docs-tool v0.8.0
|
||||
github.com/docker/docker v27.0.3+incompatible
|
||||
github.com/docker/docker v27.2.0+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
@@ -29,9 +29,9 @@ require (
|
||||
github.com/hashicorp/hcl/v2 v2.20.1
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
github.com/moby/buildkit v0.15.1
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/buildkit v0.16.0-rc2
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
@@ -54,7 +54,7 @@ require (
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.2
|
||||
k8s.io/apimachinery v0.29.2
|
||||
@@ -87,7 +87,7 @@ require (
|
||||
github.com/containerd/ttrpc v1.2.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
@@ -105,7 +105,7 @@ require (
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -128,7 +128,8 @@ require (
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@@ -152,9 +153,8 @@ require (
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
@@ -163,13 +163,13 @@ require (
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
125
go.sum
125
go.sum
@@ -1,6 +1,6 @@
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0=
|
||||
cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
@@ -80,18 +80,18 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.3 h1:bD67uqLuL/XgkAK6ir3xZvNLFPxPScEi1KW7R5esrLE=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.3/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6 h1:d0Cs0DffmOwmSzs0YPHwKCskknGq2jfGg4uGowlEpps=
|
||||
github.com/compose-spec/compose-go/v2 v2.1.6/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd v1.7.19 h1:/xQ4XRJ0tamDkdzrrBAUy/LE5nCcxFKdBm4EcPrSMEE=
|
||||
github.com/containerd/containerd v1.7.19/go.mod h1:h4FtNYUUMB4Phr6v+xG89RYKj9XccvbNSCKjdufCrkc=
|
||||
github.com/containerd/containerd v1.7.21 h1:USGXRK1eOC/SX0L195YgxTHb0a00anxajOzgfN0qrCA=
|
||||
github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g=
|
||||
github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
|
||||
github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
@@ -102,8 +102,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk=
|
||||
github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE=
|
||||
github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco=
|
||||
github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/containerd/stargz-snapshotter v0.15.1 h1:fpsP4kf/Z4n2EYnU0WT8ZCE3eiKDwikDhL6VwxIlgeA=
|
||||
@@ -111,8 +111,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
|
||||
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
|
||||
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@@ -124,15 +124,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ=
|
||||
github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.2.0+incompatible h1:yHD1QEB1/0vr5eBNpu8tncu8gWxg8EydFPOSKHzXSMM=
|
||||
github.com/docker/cli v27.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.8.0 h1:YcDWl7rQJC3lJ7WVZRwSs3bc9nka97QLWfyJQli8yJU=
|
||||
github.com/docker/cli-docs-tool v0.8.0/go.mod h1:8TQQ3E7mOXoYUs811LiPdUnAhXrcVsBIrW21a5pUbdk=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
|
||||
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4=
|
||||
github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
@@ -152,8 +152,8 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@@ -193,8 +193,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -203,6 +203,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
|
||||
@@ -226,8 +227,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
@@ -306,8 +307,8 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.15.1 h1:J6wrew7hphKqlq1wuu6yaUb/1Ra7gEzDAovylGztAKM=
|
||||
github.com/moby/buildkit v0.15.1/go.mod h1:Yis8ZMUJTHX9XhH9zVyK2igqSHV3sxi3UN0uztZocZk=
|
||||
github.com/moby/buildkit v0.16.0-rc2 h1:5uFWrGujJOWHu0duIcz0tBagS97xddogZ3OxNEfezrU=
|
||||
github.com/moby/buildkit v0.16.0-rc2/go.mod h1:9STuyLZDJNGenp/smTiR01mnvqlO5u5ZW/0/aWHQcio=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
@@ -316,14 +317,16 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
|
||||
github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -394,8 +397,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -460,6 +463,7 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
|
||||
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
||||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
@@ -475,12 +479,10 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJ
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
@@ -507,32 +509,36 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -544,19 +550,25 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
@@ -565,6 +577,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -572,19 +585,19 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
|
||||
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
|
@@ -1,20 +1,21 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM alpine:3.14 AS gen
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS gen
|
||||
RUN apk add --no-cache git
|
||||
WORKDIR /src
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
mkdir /out
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
{
|
||||
echo "# This file lists all individuals having contributed content to the repository."
|
||||
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > /out/AUTHORS
|
||||
cat /out/AUTHORS
|
||||
set -e
|
||||
mkdir /out
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
{
|
||||
echo "# This file lists all individuals having contributed content to the repository."
|
||||
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > /out/AUTHORS
|
||||
cat /out/AUTHORS
|
||||
EOT
|
||||
|
||||
FROM scratch AS update
|
||||
@@ -22,12 +23,12 @@ COPY --from=gen /out /
|
||||
|
||||
FROM gen AS validate
|
||||
RUN --mount=type=bind,target=.,rw <<EOT
|
||||
set -e
|
||||
git add -A
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||
git status --porcelain -- AUTHORS
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
git add -A
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||
git status --porcelain -- AUTHORS
|
||||
exit 1
|
||||
fi
|
||||
EOT
|
||||
|
30
hack/dockerfiles/govulncheck.Dockerfile
Normal file
30
hack/dockerfiles/govulncheck.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION="1.22"
|
||||
ARG GOVULNCHECK_VERSION="v1.1.3"
|
||||
ARG FORMAT="text"
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
WORKDIR /go/src/github.com/docker/buildx
|
||||
RUN apk add --no-cache jq moreutils
|
||||
ARG GOVULNCHECK_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go install golang.org/x/vuln/cmd/govulncheck@$GOVULNCHECK_VERSION
|
||||
|
||||
FROM base AS run
|
||||
ARG FORMAT
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
set -ex
|
||||
mkdir /out
|
||||
govulncheck -format ${FORMAT} ./... | tee /out/govulncheck.out
|
||||
if [ "${FORMAT}" = "sarif" ]; then
|
||||
# Make sure "results" field is defined in SARIF output otherwise GitHub Code Scanning
|
||||
# will fail when uploading report with "Invalid SARIF. Missing 'results' array in run."
|
||||
# Relates to https://github.com/golang/vuln/blob/ffdef74cc44d7eb71931d8d414c478b966812488/internal/sarif/sarif.go#L69
|
||||
jq '(.runs[] | select(.results == null) | .results) |= []' /out/govulncheck.out | tee >(sponge /out/govulncheck.out)
|
||||
fi
|
||||
EOT
|
||||
|
||||
FROM scratch AS output
|
||||
COPY --from=run /out /
|
@@ -13,18 +13,24 @@ FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golang-base
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
|
||||
FROM golang-base AS lint
|
||||
FROM golang-base AS lint-base
|
||||
ENV GOFLAGS="-buildvcs=false"
|
||||
ARG GOLANGCI_LINT_VERSION
|
||||
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v${GOLANGCI_LINT_VERSION}
|
||||
COPY --link --from=xx / /
|
||||
WORKDIR /go/src/github.com/docker/buildx
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
FROM lint-base AS lint
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
||||
--mount=target=/root/.cache,type=cache,id=lint-cache-$TARGETPLATFORM \
|
||||
xx-go --wrap && \
|
||||
golangci-lint run
|
||||
|
||||
FROM lint-base AS validate-golangci
|
||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
||||
golangci-lint config verify
|
||||
|
||||
FROM golang-base AS gopls
|
||||
RUN apk add --no-cache git
|
||||
ARG GOPLS_VERSION
|
||||
|
236
tests/bake.go
236
tests/bake.go
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/util/gitutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/frontend/subrequests/lint"
|
||||
"github.com/moby/buildkit/identity"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
@@ -56,6 +57,9 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){
|
||||
testListVariables,
|
||||
testBakeCallCheck,
|
||||
testBakeCallCheckFlag,
|
||||
testBakeCallMetadata,
|
||||
testBakeMultiPlatform,
|
||||
testBakeCheckCallOutput,
|
||||
}
|
||||
|
||||
func testBakePrint(t *testing.T, sb integration.Sandbox) {
|
||||
@@ -887,6 +891,56 @@ target "def" {
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBakeMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
||||
registry, err := sb.NewRegistry()
|
||||
if errors.Is(err, integration.ErrRequirements) {
|
||||
t.Skip(err.Error())
|
||||
}
|
||||
require.NoError(t, err)
|
||||
target := registry + "/buildx/registry:latest"
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM --platform=$BUILDPLATFORM busybox:latest AS base
|
||||
COPY foo /etc/foo
|
||||
RUN cp /etc/foo /etc/bar
|
||||
|
||||
FROM scratch
|
||||
COPY --from=base /etc/bar /bar
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo", []byte("foo"), 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withDir(dir), withArgs("bake"), withArgs("--set", fmt.Sprintf("*.output=type=image,name=%s,push=true", target)))
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if !isMobyWorker(sb) {
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
desc, provider, err := contentutil.ProviderFromRef(target)
|
||||
require.NoError(t, err)
|
||||
imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
|
||||
require.NoError(t, err)
|
||||
|
||||
img := imgs.Find("linux/amd64")
|
||||
require.NotNil(t, img)
|
||||
img = imgs.Find("linux/arm64")
|
||||
require.NotNil(t, img)
|
||||
|
||||
} else {
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
func testBakeMultiExporters(t *testing.T, sb integration.Sandbox) {
|
||||
if !isDockerContainerWorker(sb) {
|
||||
t.Skip("only testing with docker-container worker")
|
||||
@@ -1163,3 +1217,185 @@ target "another" {
|
||||
|
||||
require.Len(t, warnings, 1)
|
||||
}
|
||||
|
||||
func testBakeCallMetadata(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check,format=json", "--metadata-file", filepath.Join(dir, "md.json")),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
|
||||
var res map[string]any
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &res), stdout.String())
|
||||
targets, ok := res["target"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
def, ok := targets["default"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
_, ok = def["build"]
|
||||
require.True(t, ok)
|
||||
check, ok := def["check"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
warnings, ok := check["warnings"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, warnings, 3)
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
Default struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
ResultJSON lint.LintResults `json:"result.json"`
|
||||
} `json:"default"`
|
||||
}
|
||||
var md mdT
|
||||
require.NoError(t, json.Unmarshal(dt, &md), dt)
|
||||
require.Empty(t, md.Default.BuildRef)
|
||||
require.Len(t, md.Default.ResultJSON.Warnings, 3)
|
||||
}
|
||||
|
||||
func testBakeCheckCallOutput(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("check for warning count msg in check without warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, no warnings found.")
|
||||
})
|
||||
t.Run("check for warning count msg in check with single warning", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
})
|
||||
t.Run("check for warning count msg in check with multiple warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
|
||||
FROM busybox as base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
t.Run("check for warnings with multiple build targets", func(t *testing.T) {
|
||||
dockerfile1 := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
`)
|
||||
dockerfile2 := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
|
||||
FROM busybox as base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "first" {
|
||||
dockerfile = "Dockerfile.first"
|
||||
}
|
||||
target "second" {
|
||||
dockerfile = "Dockerfile.second"
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile.first", dockerfile1, 0600),
|
||||
fstest.CreateFile("Dockerfile.second", dockerfile2, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check", "first", "second"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
}
|
||||
|
147
tests/build.go
147
tests/build.go
@@ -62,7 +62,7 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){
|
||||
testBuildLabelNoKey,
|
||||
testBuildCacheExportNotSupported,
|
||||
testBuildOCIExportNotSupported,
|
||||
testBuildMultiPlatformNotSupported,
|
||||
testBuildMultiPlatform,
|
||||
testDockerHostGateway,
|
||||
testBuildNetworkModeBridge,
|
||||
testBuildShmSize,
|
||||
@@ -74,6 +74,7 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){
|
||||
testBuildSecret,
|
||||
testBuildDefaultLoad,
|
||||
testBuildCall,
|
||||
testCheckCallOutput,
|
||||
}
|
||||
|
||||
func testBuild(t *testing.T, sb integration.Sandbox) {
|
||||
@@ -616,16 +617,47 @@ func testBuildOCIExportNotSupported(t *testing.T, sb integration.Sandbox) {
|
||||
require.Contains(t, string(out), "OCI exporter is not supported")
|
||||
}
|
||||
|
||||
func testBuildMultiPlatformNotSupported(t *testing.T, sb integration.Sandbox) {
|
||||
if !isMobyWorker(sb) {
|
||||
t.Skip("only testing with docker worker")
|
||||
}
|
||||
func testBuildMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
FROM --platform=$BUILDPLATFORM busybox:latest AS base
|
||||
COPY foo /etc/foo
|
||||
RUN cp /etc/foo /etc/bar
|
||||
|
||||
dir := createTestProject(t)
|
||||
cmd := buildxCmd(sb, withArgs("build", "--platform=linux/amd64,linux/arm64", dir))
|
||||
FROM scratch
|
||||
COPY --from=base /etc/bar /bar
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo", []byte("foo"), 0600),
|
||||
)
|
||||
registry, err := sb.NewRegistry()
|
||||
if errors.Is(err, integration.ErrRequirements) {
|
||||
t.Skip(err.Error())
|
||||
}
|
||||
require.NoError(t, err)
|
||||
target := registry + "/buildx/registry:latest"
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--platform=linux/amd64,linux/arm64", fmt.Sprintf("--output=type=image,name=%s,push=true", target), dir))
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
|
||||
if !isMobyWorker(sb) {
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
desc, provider, err := contentutil.ProviderFromRef(target)
|
||||
require.NoError(t, err)
|
||||
imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
|
||||
require.NoError(t, err)
|
||||
|
||||
img := imgs.Find("linux/amd64")
|
||||
require.NotNil(t, img)
|
||||
img = imgs.Find("linux/arm64")
|
||||
require.NotNil(t, img)
|
||||
|
||||
} else {
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
func testDockerHostGateway(t *testing.T, sb integration.Sandbox) {
|
||||
@@ -1162,6 +1194,103 @@ FROM second AS binary
|
||||
|
||||
require.Equal(t, 1, len(res.Sources))
|
||||
})
|
||||
|
||||
t.Run("check metadata", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--call=check,format=json", "--metadata-file", filepath.Join(dir, "md.json"), dir))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
|
||||
var res lint.LintResults
|
||||
require.NoError(t, json.Unmarshal(stdout.Bytes(), &res), stdout.String())
|
||||
require.Len(t, res.Warnings, 3)
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
ResultJSON lint.LintResults `json:"result.json"`
|
||||
}
|
||||
var md mdT
|
||||
require.NoError(t, json.Unmarshal(dt, &md), dt)
|
||||
require.Empty(t, md.BuildRef)
|
||||
require.Len(t, md.ResultJSON.Warnings, 3)
|
||||
})
|
||||
}
|
||||
|
||||
func testCheckCallOutput(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("check for warning count msg in check without warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox AS base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--call=check", dir))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, no warnings found.")
|
||||
})
|
||||
|
||||
t.Run("check for warning count msg in check with single warning", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox as base
|
||||
COPY Dockerfile .
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--call=check", dir))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
})
|
||||
|
||||
t.Run("check for warning count msg in check with multiple warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--call=check", dir))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
}
|
||||
|
||||
func createTestProject(t *testing.T) string {
|
||||
|
@@ -9,9 +9,9 @@ import (
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
)
|
||||
|
||||
const defaultPrintFunc = "build"
|
||||
const defaultCallFunc = "build"
|
||||
|
||||
func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) {
|
||||
func ParseCallFunc(str string) (*controllerapi.CallFunc, error) {
|
||||
if str == "" {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -20,7 +20,7 @@ func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &controllerapi.PrintFunc{}
|
||||
f := &controllerapi.CallFunc{}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
@@ -51,7 +51,7 @@ func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) {
|
||||
f.Name = "lint"
|
||||
}
|
||||
|
||||
if f.Name == defaultPrintFunc {
|
||||
if f.Name == defaultCallFunc {
|
||||
return nil, nil
|
||||
}
|
||||
|
@@ -17,9 +17,35 @@ import (
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
type rePatterns struct {
|
||||
LocalSourceType *regexp.Regexp
|
||||
ImageSourceType *regexp.Regexp
|
||||
ExecType *regexp.Regexp
|
||||
ExportImageType *regexp.Regexp
|
||||
LintMessage *regexp.Regexp
|
||||
}
|
||||
|
||||
var re = sync.OnceValue(func() *rePatterns {
|
||||
return &rePatterns{
|
||||
LocalSourceType: regexp.MustCompile(
|
||||
strings.Join([]string{
|
||||
`(?P<context>\[internal] load build context)`,
|
||||
`(?P<dockerfile>load build definition)`,
|
||||
`(?P<dockerignore>load \.dockerignore)`,
|
||||
`(?P<namedcontext>\[context .+] load from client)`,
|
||||
}, "|"),
|
||||
),
|
||||
ImageSourceType: regexp.MustCompile(`^\[.*] FROM `),
|
||||
ExecType: regexp.MustCompile(`^\[.*] RUN `),
|
||||
ExportImageType: regexp.MustCompile(`^exporting to (image|(?P<format>\w+) image format)$`),
|
||||
LintMessage: regexp.MustCompile(`^https://docs\.docker\.com/go/dockerfile/rule/([\w|-]+)/`),
|
||||
}
|
||||
})
|
||||
|
||||
type metricWriter struct {
|
||||
recorders []metricRecorder
|
||||
attrs attribute.Set
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newMetrics(mp metric.MeterProvider, attrs attribute.Set) *metricWriter {
|
||||
@@ -38,6 +64,9 @@ func newMetrics(mp metric.MeterProvider, attrs attribute.Set) *metricWriter {
|
||||
}
|
||||
|
||||
func (mw *metricWriter) Write(ss *client.SolveStatus) {
|
||||
mw.mu.Lock()
|
||||
defer mw.mu.Unlock()
|
||||
|
||||
for _, recorder := range mw.recorders {
|
||||
recorder.Record(ss)
|
||||
}
|
||||
@@ -128,22 +157,13 @@ func (mr *localSourceTransferMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
var reLocalSourceType = regexp.MustCompile(
|
||||
strings.Join([]string{
|
||||
`(?P<context>\[internal] load build context)`,
|
||||
`(?P<dockerfile>load build definition)`,
|
||||
`(?P<dockerignore>load \.dockerignore)`,
|
||||
`(?P<namedcontext>\[context .+] load from client)`,
|
||||
}, "|"),
|
||||
)
|
||||
|
||||
func detectLocalSourceType(vertexName string) attribute.KeyValue {
|
||||
match := reLocalSourceType.FindStringSubmatch(vertexName)
|
||||
match := re().LocalSourceType.FindStringSubmatch(vertexName)
|
||||
if match == nil {
|
||||
return attribute.KeyValue{}
|
||||
}
|
||||
|
||||
for i, source := range reLocalSourceType.SubexpNames() {
|
||||
for i, source := range re().LocalSourceType.SubexpNames() {
|
||||
if len(source) == 0 {
|
||||
// Not a subexpression.
|
||||
continue
|
||||
@@ -241,10 +261,8 @@ func (mr *imageSourceMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
var reImageSourceType = regexp.MustCompile(`^\[.*] FROM `)
|
||||
|
||||
func detectImageSourceType(vertexName string) bool {
|
||||
return reImageSourceType.MatchString(vertexName)
|
||||
return re().ImageSourceType.MatchString(vertexName)
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -278,10 +296,8 @@ func (mr *execMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
var reExecType = regexp.MustCompile(`^\[.*] RUN `)
|
||||
|
||||
func detectExecType(vertexName string) bool {
|
||||
return reExecType.MatchString(vertexName)
|
||||
return re().ExecType.MatchString(vertexName)
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -325,10 +341,8 @@ func (mr *exportImageMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
var reExportImageType = regexp.MustCompile(`^exporting to (image|(?P<format>\w+) image format)$`)
|
||||
|
||||
func detectExportImageType(vertexName string) string {
|
||||
m := reExportImageType.FindStringSubmatch(vertexName)
|
||||
m := re().ExportImageType.FindStringSubmatch(vertexName)
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
@@ -456,7 +470,10 @@ func kebabToCamel(s string) string {
|
||||
return strings.Join(words, "")
|
||||
}
|
||||
|
||||
var lintRuleNameProperty = attribute.Key("lint.rule.name")
|
||||
|
||||
func (mr *lintMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
reLintMessage := re().LintMessage
|
||||
for _, warning := range ss.Warnings {
|
||||
m := reLintMessage.FindSubmatch([]byte(warning.URL))
|
||||
if len(m) < 2 {
|
||||
@@ -472,8 +489,3 @@ func (mr *lintMetricRecorder) Record(ss *client.SolveStatus) {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
reLintMessage = regexp.MustCompile(`^https://docs\.docker\.com/go/dockerfile/rule/([\w|-]+)/`)
|
||||
lintRuleNameProperty = attribute.Key("lint.rule.name")
|
||||
)
|
||||
|
@@ -44,6 +44,15 @@ func (p *Printer) Wait() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
func (p *Printer) IsDone() bool {
|
||||
select {
|
||||
case <-p.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Printer) Pause() error {
|
||||
p.paused = make(chan struct{})
|
||||
return p.Wait()
|
||||
|
59
vendor/github.com/compose-spec/compose-go/v2/loader/environment.go
generated
vendored
59
vendor/github.com/compose-spec/compose-go/v2/loader/environment.go
generated
vendored
@@ -22,9 +22,14 @@ import (
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
)
|
||||
|
||||
// Will update the environment variables for the format {- VAR} (without interpolation)
|
||||
// This function should resolve context environment vars for include (passed in env_file)
|
||||
func resolveServicesEnvironment(dict map[string]any, config types.ConfigDetails) {
|
||||
// ResolveEnvironment update the environment variables for the format {- VAR} (without interpolation)
|
||||
func ResolveEnvironment(dict map[string]any, environment types.Mapping) {
|
||||
resolveServicesEnvironment(dict, environment)
|
||||
resolveSecretsEnvironment(dict, environment)
|
||||
resolveConfigsEnvironment(dict, environment)
|
||||
}
|
||||
|
||||
func resolveServicesEnvironment(dict map[string]any, environment types.Mapping) {
|
||||
services, ok := dict["services"].(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
@@ -45,7 +50,7 @@ func resolveServicesEnvironment(dict map[string]any, config types.ConfigDetails)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if found, ok := config.Environment[varEnv]; ok {
|
||||
if found, ok := environment[varEnv]; ok {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", varEnv, found))
|
||||
} else {
|
||||
// either does not exist or it was already resolved in interpolation
|
||||
@@ -57,3 +62,49 @@ func resolveServicesEnvironment(dict map[string]any, config types.ConfigDetails)
|
||||
}
|
||||
dict["services"] = services
|
||||
}
|
||||
|
||||
func resolveSecretsEnvironment(dict map[string]any, environment types.Mapping) {
|
||||
secrets, ok := dict["secrets"].(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for name, cfg := range secrets {
|
||||
secret, ok := cfg.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
env, ok := secret["environment"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if found, ok := environment[env]; ok {
|
||||
secret["content"] = found
|
||||
}
|
||||
secrets[name] = secret
|
||||
}
|
||||
dict["secrets"] = secrets
|
||||
}
|
||||
|
||||
func resolveConfigsEnvironment(dict map[string]any, environment types.Mapping) {
|
||||
configs, ok := dict["configs"].(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for name, cfg := range configs {
|
||||
config, ok := cfg.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
env, ok := config["environment"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if found, ok := environment[env]; ok {
|
||||
config["content"] = found
|
||||
}
|
||||
configs[name] = config
|
||||
}
|
||||
dict["configs"] = configs
|
||||
}
|
||||
|
67
vendor/github.com/compose-spec/compose-go/v2/loader/extends.go
generated
vendored
67
vendor/github.com/compose-spec/compose-go/v2/loader/extends.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/consts"
|
||||
"github.com/compose-spec/compose-go/v2/override"
|
||||
"github.com/compose-spec/compose-go/v2/paths"
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
)
|
||||
|
||||
@@ -75,17 +76,23 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
|
||||
opts.ProcessEvent("extends", map[string]any{"service": ref})
|
||||
}
|
||||
|
||||
var base any
|
||||
var (
|
||||
base any
|
||||
processor PostProcessor
|
||||
)
|
||||
|
||||
if file != nil {
|
||||
filename = file.(string)
|
||||
services, err = getExtendsBaseFromFile(ctx, ref, filename, opts, tracker)
|
||||
refFilename := file.(string)
|
||||
services, processor, err = getExtendsBaseFromFile(ctx, name, ref, filename, refFilename, opts, tracker)
|
||||
post = append(post, processor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filename = refFilename
|
||||
} else {
|
||||
_, ok := services[ref]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename)
|
||||
return nil, fmt.Errorf("cannot extend service %q in %s: service %q not found", name, filename, ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,47 +128,65 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
func getExtendsBaseFromFile(ctx context.Context, name string, path string, opts *Options, ct *cycleTracker) (map[string]any, error) {
|
||||
func getExtendsBaseFromFile(
|
||||
ctx context.Context,
|
||||
name, ref string,
|
||||
path, refPath string,
|
||||
opts *Options,
|
||||
ct *cycleTracker,
|
||||
) (map[string]any, PostProcessor, error) {
|
||||
for _, loader := range opts.ResourceLoaders {
|
||||
if !loader.Accept(path) {
|
||||
if !loader.Accept(refPath) {
|
||||
continue
|
||||
}
|
||||
local, err := loader.Load(ctx, path)
|
||||
local, err := loader.Load(ctx, refPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
localdir := filepath.Dir(local)
|
||||
relworkingdir := loader.Dir(path)
|
||||
relworkingdir := loader.Dir(refPath)
|
||||
|
||||
extendsOpts := opts.clone()
|
||||
// replace localResourceLoader with a new flavour, using extended file base path
|
||||
extendsOpts.ResourceLoaders = append(opts.RemoteResourceLoaders(), localResourceLoader{
|
||||
WorkingDir: localdir,
|
||||
})
|
||||
extendsOpts.ResolvePaths = true
|
||||
extendsOpts.ResolvePaths = false // we do relative path resolution after file has been loaded
|
||||
extendsOpts.SkipNormalization = true
|
||||
extendsOpts.SkipConsistencyCheck = true
|
||||
extendsOpts.SkipInclude = true
|
||||
extendsOpts.SkipExtends = true // we manage extends recursively based on raw service definition
|
||||
extendsOpts.SkipValidation = true // we validate the merge result
|
||||
extendsOpts.SkipDefaultValues = true
|
||||
source, err := loadYamlModel(ctx, types.ConfigDetails{
|
||||
WorkingDir: relworkingdir,
|
||||
ConfigFiles: []types.ConfigFile{
|
||||
{Filename: local},
|
||||
},
|
||||
}, extendsOpts, ct, nil)
|
||||
source, processor, err := loadYamlFile(ctx, types.ConfigFile{Filename: local},
|
||||
extendsOpts, relworkingdir, nil, ct, map[string]any{}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
services := source["services"].(map[string]any)
|
||||
_, ok := services[name]
|
||||
_, ok := services[ref]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, path)
|
||||
return nil, nil, fmt.Errorf(
|
||||
"cannot extend service %q in %s: service %q not found in %s",
|
||||
name,
|
||||
path,
|
||||
ref,
|
||||
refPath,
|
||||
)
|
||||
}
|
||||
return services, nil
|
||||
|
||||
var remotes []paths.RemoteResource
|
||||
for _, loader := range opts.RemoteResourceLoaders() {
|
||||
remotes = append(remotes, loader.Accept)
|
||||
}
|
||||
err = paths.ResolveRelativePaths(source, relworkingdir, remotes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return services, processor, nil
|
||||
}
|
||||
return nil, fmt.Errorf("cannot read %s", path)
|
||||
return nil, nil, fmt.Errorf("cannot read %s", refPath)
|
||||
}
|
||||
|
||||
func deepClone(value any) any {
|
||||
|
13
vendor/github.com/compose-spec/compose-go/v2/loader/include.go
generated
vendored
13
vendor/github.com/compose-spec/compose-go/v2/loader/include.go
generated
vendored
@@ -50,16 +50,17 @@ func loadIncludeConfig(source any) ([]types.IncludeConfig, error) {
|
||||
return requires, err
|
||||
}
|
||||
|
||||
func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model map[string]any, options *Options, included []string) error {
|
||||
func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string) error {
|
||||
includeConfig, err := loadIncludeConfig(model["include"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range includeConfig {
|
||||
for _, listener := range options.Listeners {
|
||||
listener("include", map[string]any{
|
||||
"path": r.Path,
|
||||
"workingdir": configDetails.WorkingDir,
|
||||
"workingdir": workingDir,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -83,7 +84,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
|
||||
r.ProjectDirectory = filepath.Dir(path)
|
||||
case !filepath.IsAbs(r.ProjectDirectory):
|
||||
relworkingdir = loader.Dir(r.ProjectDirectory)
|
||||
r.ProjectDirectory = filepath.Join(configDetails.WorkingDir, r.ProjectDirectory)
|
||||
r.ProjectDirectory = filepath.Join(workingDir, r.ProjectDirectory)
|
||||
|
||||
default:
|
||||
relworkingdir = r.ProjectDirectory
|
||||
@@ -117,7 +118,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
|
||||
envFile := []string{}
|
||||
for _, f := range r.EnvFile {
|
||||
if !filepath.IsAbs(f) {
|
||||
f = filepath.Join(configDetails.WorkingDir, f)
|
||||
f = filepath.Join(workingDir, f)
|
||||
s, err := os.Stat(f)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -131,7 +132,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
|
||||
r.EnvFile = envFile
|
||||
}
|
||||
|
||||
envFromFile, err := dotenv.GetEnvFromFile(configDetails.Environment, r.EnvFile)
|
||||
envFromFile, err := dotenv.GetEnvFromFile(environment, r.EnvFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,7 +140,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
|
||||
config := types.ConfigDetails{
|
||||
WorkingDir: relworkingdir,
|
||||
ConfigFiles: types.ToConfigFiles(r.Path),
|
||||
Environment: configDetails.Environment.Clone().Merge(envFromFile),
|
||||
Environment: environment.Clone().Merge(envFromFile),
|
||||
}
|
||||
loadOptions.Interpolate = &interp.Options{
|
||||
Substitute: options.Interpolate.Substitute,
|
||||
|
220
vendor/github.com/compose-spec/compose-go/v2/loader/loader.go
generated
vendored
220
vendor/github.com/compose-spec/compose-go/v2/loader/loader.go
generated
vendored
@@ -89,7 +89,7 @@ var versionWarning []string
|
||||
|
||||
func (o *Options) warnObsoleteVersion(file string) {
|
||||
if !slices.Contains(versionWarning, file) {
|
||||
logrus.Warning(fmt.Sprintf("%s: `version` is obsolete", file))
|
||||
logrus.Warning(fmt.Sprintf("%s: the attribute `version` is obsolete, it will be ignored, please remove it to avoid potential confusion", file))
|
||||
}
|
||||
versionWarning = append(versionWarning, file)
|
||||
}
|
||||
@@ -358,112 +358,13 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
|
||||
dict = map[string]interface{}{}
|
||||
err error
|
||||
)
|
||||
workingDir, environment := config.WorkingDir, config.Environment
|
||||
|
||||
for _, file := range config.ConfigFiles {
|
||||
fctx := context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename)
|
||||
if file.Content == nil && file.Config == nil {
|
||||
content, err := os.ReadFile(file.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file.Content = content
|
||||
dict, _, err = loadYamlFile(ctx, file, opts, workingDir, environment, ct, dict, included)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
processRawYaml := func(raw interface{}, processors ...PostProcessor) error {
|
||||
converted, err := convertToStringKeysRecursive(raw, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, ok := converted.(map[string]interface{})
|
||||
if !ok {
|
||||
return errors.New("Top-level object must be a mapping")
|
||||
}
|
||||
|
||||
if opts.Interpolate != nil && !opts.SkipInterpolation {
|
||||
cfg, err = interp.Interpolate(cfg, *opts.Interpolate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fixEmptyNotNull(cfg)
|
||||
|
||||
if !opts.SkipExtends {
|
||||
err = ApplyExtends(fctx, cfg, opts, ct, processors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, processor := range processors {
|
||||
if err := processor.Apply(dict); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.SkipInclude {
|
||||
included = append(included, config.ConfigFiles[0].Filename)
|
||||
err = ApplyInclude(ctx, config, cfg, opts, included)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dict, err = override.Merge(dict, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dict, err = override.EnforceUnicity(dict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(dict); err != nil {
|
||||
return fmt.Errorf("validating %s: %w", file.Filename, err)
|
||||
}
|
||||
if _, ok := dict["version"]; ok {
|
||||
opts.warnObsoleteVersion(file.Filename)
|
||||
delete(dict, "version")
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if file.Config == nil {
|
||||
r := bytes.NewReader(file.Content)
|
||||
decoder := yaml.NewDecoder(r)
|
||||
for {
|
||||
var raw interface{}
|
||||
processor := &ResetProcessor{target: &raw}
|
||||
err := decoder.Decode(processor)
|
||||
if err != nil && errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := processRawYaml(raw, processor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := processRawYaml(file.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dict, err = transform.Canonical(dict, opts.SkipInterpolation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override
|
||||
dict, err = override.EnforceUnicity(dict)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !opts.SkipDefaultValues {
|
||||
@@ -489,11 +390,118 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resolveServicesEnvironment(dict, config)
|
||||
ResolveEnvironment(dict, config.Environment)
|
||||
|
||||
return dict, nil
|
||||
}
|
||||
|
||||
func loadYamlFile(ctx context.Context, file types.ConfigFile, opts *Options, workingDir string, environment types.Mapping, ct *cycleTracker, dict map[string]interface{}, included []string) (map[string]interface{}, PostProcessor, error) {
|
||||
ctx = context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename)
|
||||
if file.Content == nil && file.Config == nil {
|
||||
content, err := os.ReadFile(file.Filename)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
file.Content = content
|
||||
}
|
||||
|
||||
processRawYaml := func(raw interface{}, processors ...PostProcessor) error {
|
||||
converted, err := convertToStringKeysRecursive(raw, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, ok := converted.(map[string]interface{})
|
||||
if !ok {
|
||||
return errors.New("Top-level object must be a mapping")
|
||||
}
|
||||
|
||||
if opts.Interpolate != nil && !opts.SkipInterpolation {
|
||||
cfg, err = interp.Interpolate(cfg, *opts.Interpolate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fixEmptyNotNull(cfg)
|
||||
|
||||
if !opts.SkipExtends {
|
||||
err = ApplyExtends(ctx, cfg, opts, ct, processors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, processor := range processors {
|
||||
if err := processor.Apply(dict); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.SkipInclude {
|
||||
included = append(included, file.Filename)
|
||||
err = ApplyInclude(ctx, workingDir, environment, cfg, opts, included)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dict, err = override.Merge(dict, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dict, err = override.EnforceUnicity(dict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(dict); err != nil {
|
||||
return fmt.Errorf("validating %s: %w", file.Filename, err)
|
||||
}
|
||||
if _, ok := dict["version"]; ok {
|
||||
opts.warnObsoleteVersion(file.Filename)
|
||||
delete(dict, "version")
|
||||
}
|
||||
}
|
||||
|
||||
dict, err = transform.Canonical(dict, opts.SkipInterpolation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override
|
||||
dict, err = override.EnforceUnicity(dict)
|
||||
return err
|
||||
}
|
||||
|
||||
var processor PostProcessor
|
||||
if file.Config == nil {
|
||||
r := bytes.NewReader(file.Content)
|
||||
decoder := yaml.NewDecoder(r)
|
||||
for {
|
||||
var raw interface{}
|
||||
reset := &ResetProcessor{target: &raw}
|
||||
err := decoder.Decode(reset)
|
||||
if err != nil && errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
processor = reset
|
||||
if err := processRawYaml(raw, processor); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := processRawYaml(file.Config); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return dict, processor, nil
|
||||
}
|
||||
|
||||
func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (map[string]interface{}, error) {
|
||||
mainFile := configDetails.ConfigFiles[0].Filename
|
||||
for _, f := range loaded {
|
||||
|
13
vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go
generated
vendored
13
vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go
generated
vendored
@@ -52,14 +52,14 @@ func Normalize(dict map[string]any, env types.Mapping) (map[string]any, error) {
|
||||
}
|
||||
|
||||
if a, ok := build["args"]; ok {
|
||||
build["args"], _ = resolve(a, fn)
|
||||
build["args"], _ = resolve(a, fn, false)
|
||||
}
|
||||
|
||||
service["build"] = build
|
||||
}
|
||||
|
||||
if e, ok := service["environment"]; ok {
|
||||
service["environment"], _ = resolve(e, fn)
|
||||
service["environment"], _ = resolve(e, fn, true)
|
||||
}
|
||||
|
||||
var dependsOn map[string]any
|
||||
@@ -178,12 +178,12 @@ func normalizeNetworks(dict map[string]any) {
|
||||
}
|
||||
}
|
||||
|
||||
func resolve(a any, fn func(s string) (string, bool)) (any, bool) {
|
||||
func resolve(a any, fn func(s string) (string, bool), keepEmpty bool) (any, bool) {
|
||||
switch v := a.(type) {
|
||||
case []any:
|
||||
var resolved []any
|
||||
for _, val := range v {
|
||||
if r, ok := resolve(val, fn); ok {
|
||||
if r, ok := resolve(val, fn, keepEmpty); ok {
|
||||
resolved = append(resolved, r)
|
||||
}
|
||||
}
|
||||
@@ -197,6 +197,8 @@ func resolve(a any, fn func(s string) (string, bool)) (any, bool) {
|
||||
}
|
||||
if s, ok := fn(key); ok {
|
||||
resolved[key] = s
|
||||
} else if keepEmpty {
|
||||
resolved[key] = nil
|
||||
}
|
||||
}
|
||||
return resolved, true
|
||||
@@ -205,6 +207,9 @@ func resolve(a any, fn func(s string) (string, bool)) (any, bool) {
|
||||
if val, ok := fn(v); ok {
|
||||
return fmt.Sprintf("%s=%s", v, val), true
|
||||
}
|
||||
if keepEmpty {
|
||||
return v, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
return v, true
|
||||
|
17
vendor/github.com/compose-spec/compose-go/v2/override/merge.go
generated
vendored
17
vendor/github.com/compose-spec/compose-go/v2/override/merge.go
generated
vendored
@@ -237,20 +237,33 @@ func mergeIPAMConfig(c any, o any, path tree.Path) (any, error) {
|
||||
return ipamConfigs, nil
|
||||
}
|
||||
|
||||
func convertIntoMapping(a any, defaultValue any) map[string]any {
|
||||
func convertIntoMapping(a any, defaultValue map[string]any) map[string]any {
|
||||
switch v := a.(type) {
|
||||
case map[string]any:
|
||||
return v
|
||||
case []any:
|
||||
converted := map[string]any{}
|
||||
for _, s := range v {
|
||||
converted[s.(string)] = defaultValue
|
||||
if defaultValue == nil {
|
||||
converted[s.(string)] = nil
|
||||
} else {
|
||||
// Create a new map for each key
|
||||
converted[s.(string)] = copyMap(defaultValue)
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyMap(m map[string]any) map[string]any {
|
||||
c := make(map[string]any)
|
||||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func override(_ any, other any, _ tree.Path) (any, error) {
|
||||
return other, nil
|
||||
}
|
||||
|
32
vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go
generated
vendored
32
vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go
generated
vendored
@@ -38,7 +38,7 @@ func ResolveRelativePaths(project map[string]any, base string, remotes []RemoteR
|
||||
"services.*.build.additional_contexts.*": r.absContextPath,
|
||||
"services.*.env_file.*.path": r.absPath,
|
||||
"services.*.extends.file": r.absExtendsPath,
|
||||
"services.*.develop.watch.*.path": r.absPath,
|
||||
"services.*.develop.watch.*.path": r.absSymbolicLink,
|
||||
"services.*.volumes.*": r.absVolumeMount,
|
||||
"configs.*.file": r.maybeUnixPath,
|
||||
"secrets.*.file": r.maybeUnixPath,
|
||||
@@ -116,24 +116,30 @@ func (r *relativePathsResolver) absPath(value any) (any, error) {
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected type %T", value)
|
||||
}
|
||||
|
||||
func (r *relativePathsResolver) absVolumeMount(a any) (any, error) {
|
||||
vol := a.(map[string]any)
|
||||
if vol["type"] != types.VolumeTypeBind {
|
||||
switch vol := a.(type) {
|
||||
case map[string]any:
|
||||
if vol["type"] != types.VolumeTypeBind {
|
||||
return vol, nil
|
||||
}
|
||||
src, ok := vol["source"]
|
||||
if !ok {
|
||||
return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
abs, err := r.maybeUnixPath(src.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vol["source"] = abs
|
||||
return vol, nil
|
||||
default:
|
||||
// not using canonical format, skip
|
||||
return a, nil
|
||||
}
|
||||
src, ok := vol["source"]
|
||||
if !ok {
|
||||
return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
abs, err := r.maybeUnixPath(src.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vol["source"] = abs
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func (r *relativePathsResolver) volumeDriverOpts(a any) (any, error) {
|
||||
|
14
vendor/github.com/compose-spec/compose-go/v2/paths/unix.go
generated
vendored
14
vendor/github.com/compose-spec/compose-go/v2/paths/unix.go
generated
vendored
@@ -19,6 +19,8 @@ package paths
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/utils"
|
||||
)
|
||||
|
||||
func (r *relativePathsResolver) maybeUnixPath(a any) (any, error) {
|
||||
@@ -38,3 +40,15 @@ func (r *relativePathsResolver) maybeUnixPath(a any) (any, error) {
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (r *relativePathsResolver) absSymbolicLink(value any) (any, error) {
|
||||
abs, err := r.absPath(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
str, ok := abs.(string)
|
||||
if !ok {
|
||||
return abs, nil
|
||||
}
|
||||
return utils.ResolveSymbolicLink(str)
|
||||
}
|
||||
|
135
vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json
generated
vendored
135
vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json
generated
vendored
@@ -13,7 +13,6 @@
|
||||
|
||||
"name": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z0-9][a-z0-9_-]*$",
|
||||
"description": "define the Compose project name, until user defines one explicitly."
|
||||
},
|
||||
|
||||
@@ -94,7 +93,7 @@
|
||||
"develop": {"$ref": "#/definitions/development"},
|
||||
"deploy": {"$ref": "#/definitions/deployment"},
|
||||
"annotations": {"$ref": "#/definitions/list_or_dict"},
|
||||
"attach": {"type": "boolean"},
|
||||
"attach": {"type": ["boolean", "string"]},
|
||||
"build": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
@@ -110,15 +109,15 @@
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"cache_from": {"type": "array", "items": {"type": "string"}},
|
||||
"cache_to": {"type": "array", "items": {"type": "string"}},
|
||||
"no_cache": {"type": "boolean"},
|
||||
"no_cache": {"type": ["boolean", "string"]},
|
||||
"additional_contexts": {"$ref": "#/definitions/list_or_dict"},
|
||||
"network": {"type": "string"},
|
||||
"pull": {"type": "boolean"},
|
||||
"pull": {"type": ["boolean", "string"]},
|
||||
"target": {"type": "string"},
|
||||
"shm_size": {"type": ["integer", "string"]},
|
||||
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
|
||||
"isolation": {"type": "string"},
|
||||
"privileged": {"type": "boolean"},
|
||||
"privileged": {"type": ["boolean", "string"]},
|
||||
"secrets": {"$ref": "#/definitions/service_config_or_secret"},
|
||||
"tags": {"type": "array", "items": {"type": "string"}},
|
||||
"ulimits": {"$ref": "#/definitions/ulimits"},
|
||||
@@ -148,7 +147,7 @@
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/blkio_limit"}
|
||||
},
|
||||
"weight": {"type": "integer"},
|
||||
"weight": {"type": ["integer", "string"]},
|
||||
"weight_device": {
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/blkio_weight"}
|
||||
@@ -163,8 +162,14 @@
|
||||
"command": {"$ref": "#/definitions/command"},
|
||||
"configs": {"$ref": "#/definitions/service_config_or_secret"},
|
||||
"container_name": {"type": "string"},
|
||||
"cpu_count": {"type": "integer", "minimum": 0},
|
||||
"cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
|
||||
"cpu_count": {"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer", "minimum": 0}
|
||||
]},
|
||||
"cpu_percent": {"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer", "minimum": 0, "maximum": 100}
|
||||
]},
|
||||
"cpu_shares": {"type": ["number", "string"]},
|
||||
"cpu_quota": {"type": ["number", "string"]},
|
||||
"cpu_period": {"type": ["number", "string"]},
|
||||
@@ -192,8 +197,9 @@
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}},
|
||||
"properties": {
|
||||
"restart": {"type": "boolean"},
|
||||
"restart": {"type": ["boolean", "string"]},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
@@ -254,7 +260,7 @@
|
||||
"healthcheck": {"$ref": "#/definitions/healthcheck"},
|
||||
"hostname": {"type": "string"},
|
||||
"image": {"type": "string"},
|
||||
"init": {"type": "boolean"},
|
||||
"init": {"type": ["boolean", "string"]},
|
||||
"ipc": {"type": "string"},
|
||||
"isolation": {"type": "string"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
@@ -277,7 +283,7 @@
|
||||
"mac_address": {"type": "string"},
|
||||
"mem_limit": {"type": ["number", "string"]},
|
||||
"mem_reservation": {"type": ["string", "integer"]},
|
||||
"mem_swappiness": {"type": "integer"},
|
||||
"mem_swappiness": {"type": ["integer", "string"]},
|
||||
"memswap_limit": {"type": ["number", "string"]},
|
||||
"network_mode": {"type": "string"},
|
||||
"networks": {
|
||||
@@ -315,8 +321,11 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"oom_kill_disable": {"type": "boolean"},
|
||||
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
|
||||
"oom_kill_disable": {"type": ["boolean", "string"]},
|
||||
"oom_score_adj": {"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer", "minimum": -1000, "maximum": 1000}
|
||||
]},
|
||||
"pid": {"type": ["string", "null"]},
|
||||
"pids_limit": {"type": ["number", "string"]},
|
||||
"platform": {"type": "string"},
|
||||
@@ -324,15 +333,15 @@
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{"type": "number", "format": "ports"},
|
||||
{"type": "string", "format": "ports"},
|
||||
{"type": "number"},
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"mode": {"type": "string"},
|
||||
"host_ip": {"type": "string"},
|
||||
"target": {"type": "integer"},
|
||||
"target": {"type": ["integer", "string"]},
|
||||
"published": {"type": ["string", "integer"]},
|
||||
"protocol": {"type": "string"},
|
||||
"app_protocol": {"type": "string"}
|
||||
@@ -344,29 +353,29 @@
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
"privileged": {"type": "boolean"},
|
||||
"privileged": {"type": ["boolean", "string"]},
|
||||
"profiles": {"$ref": "#/definitions/list_of_strings"},
|
||||
"pull_policy": {"type": "string", "enum": [
|
||||
"always", "never", "if_not_present", "build", "missing"
|
||||
]},
|
||||
"read_only": {"type": "boolean"},
|
||||
"read_only": {"type": ["boolean", "string"]},
|
||||
"restart": {"type": "string"},
|
||||
"runtime": {
|
||||
"type": "string"
|
||||
},
|
||||
"scale": {
|
||||
"type": "integer"
|
||||
"type": ["integer", "string"]
|
||||
},
|
||||
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"shm_size": {"type": ["number", "string"]},
|
||||
"secrets": {"$ref": "#/definitions/service_config_or_secret"},
|
||||
"sysctls": {"$ref": "#/definitions/list_or_dict"},
|
||||
"stdin_open": {"type": "boolean"},
|
||||
"stop_grace_period": {"type": "string", "format": "duration"},
|
||||
"stdin_open": {"type": ["boolean", "string"]},
|
||||
"stop_grace_period": {"type": "string"},
|
||||
"stop_signal": {"type": "string"},
|
||||
"storage_opt": {"type": "object"},
|
||||
"tmpfs": {"$ref": "#/definitions/string_or_list"},
|
||||
"tty": {"type": "boolean"},
|
||||
"tty": {"type": ["boolean", "string"]},
|
||||
"ulimits": {"$ref": "#/definitions/ulimits"},
|
||||
"user": {"type": "string"},
|
||||
"uts": {"type": "string"},
|
||||
@@ -383,13 +392,13 @@
|
||||
"type": {"type": "string"},
|
||||
"source": {"type": "string"},
|
||||
"target": {"type": "string"},
|
||||
"read_only": {"type": "boolean"},
|
||||
"read_only": {"type": ["boolean", "string"]},
|
||||
"consistency": {"type": "string"},
|
||||
"bind": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"propagation": {"type": "string"},
|
||||
"create_host_path": {"type": "boolean"},
|
||||
"create_host_path": {"type": ["boolean", "string"]},
|
||||
"selinux": {"type": "string", "enum": ["z", "Z"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -398,7 +407,7 @@
|
||||
"volume": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"nocopy": {"type": "boolean"},
|
||||
"nocopy": {"type": ["boolean", "string"]},
|
||||
"subpath": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -413,7 +422,7 @@
|
||||
{"type": "string"}
|
||||
]
|
||||
},
|
||||
"mode": {"type": "number"}
|
||||
"mode": {"type": ["number", "string"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -441,18 +450,18 @@
|
||||
"id": "#/definitions/healthcheck",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"disable": {"type": "boolean"},
|
||||
"interval": {"type": "string", "format": "duration"},
|
||||
"retries": {"type": "number"},
|
||||
"disable": {"type": ["boolean", "string"]},
|
||||
"interval": {"type": "string"},
|
||||
"retries": {"type": ["number", "string"]},
|
||||
"test": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"timeout": {"type": "string", "format": "duration"},
|
||||
"start_period": {"type": "string", "format": "duration"},
|
||||
"start_interval": {"type": "string", "format": "duration"}
|
||||
"timeout": {"type": "string"},
|
||||
"start_period": {"type": "string"},
|
||||
"start_interval": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -471,7 +480,9 @@
|
||||
"path": {"type": "string"},
|
||||
"action": {"type": "string", "enum": ["rebuild", "sync", "sync+restart"]},
|
||||
"target": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -484,16 +495,16 @@
|
||||
"properties": {
|
||||
"mode": {"type": "string"},
|
||||
"endpoint_mode": {"type": "string"},
|
||||
"replicas": {"type": "integer"},
|
||||
"replicas": {"type": ["integer", "string"]},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"rollback_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"parallelism": {"type": "integer"},
|
||||
"delay": {"type": "string", "format": "duration"},
|
||||
"parallelism": {"type": ["integer", "string"]},
|
||||
"delay": {"type": "string"},
|
||||
"failure_action": {"type": "string"},
|
||||
"monitor": {"type": "string", "format": "duration"},
|
||||
"max_failure_ratio": {"type": "number"},
|
||||
"monitor": {"type": "string"},
|
||||
"max_failure_ratio": {"type": ["number", "string"]},
|
||||
"order": {"type": "string", "enum": [
|
||||
"start-first", "stop-first"
|
||||
]}
|
||||
@@ -504,11 +515,11 @@
|
||||
"update_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"parallelism": {"type": "integer"},
|
||||
"delay": {"type": "string", "format": "duration"},
|
||||
"parallelism": {"type": ["integer", "string"]},
|
||||
"delay": {"type": "string"},
|
||||
"failure_action": {"type": "string"},
|
||||
"monitor": {"type": "string", "format": "duration"},
|
||||
"max_failure_ratio": {"type": "number"},
|
||||
"monitor": {"type": "string"},
|
||||
"max_failure_ratio": {"type": ["number", "string"]},
|
||||
"order": {"type": "string", "enum": [
|
||||
"start-first", "stop-first"
|
||||
]}
|
||||
@@ -524,7 +535,7 @@
|
||||
"properties": {
|
||||
"cpus": {"type": ["number", "string"]},
|
||||
"memory": {"type": "string"},
|
||||
"pids": {"type": "integer"}
|
||||
"pids": {"type": ["integer", "string"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -548,9 +559,9 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"condition": {"type": "string"},
|
||||
"delay": {"type": "string", "format": "duration"},
|
||||
"max_attempts": {"type": "integer"},
|
||||
"window": {"type": "string", "format": "duration"}
|
||||
"delay": {"type": "string"},
|
||||
"max_attempts": {"type": ["integer", "string"]},
|
||||
"window": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -570,7 +581,7 @@
|
||||
"patternProperties": {"^x-": {}}
|
||||
}
|
||||
},
|
||||
"max_replicas_per_node": {"type": "integer"}
|
||||
"max_replicas_per_node": {"type": ["integer", "string"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -590,7 +601,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"kind": {"type": "string"},
|
||||
"value": {"type": "number"}
|
||||
"value": {"type": ["number", "string"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -655,7 +666,7 @@
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"subnet": {"type": "string", "format": "subnet_ip_address"},
|
||||
"subnet": {"type": "string"},
|
||||
"ip_range": {"type": "string"},
|
||||
"gateway": {"type": "string"},
|
||||
"aux_addresses": {
|
||||
@@ -678,7 +689,7 @@
|
||||
"patternProperties": {"^x-": {}}
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"type": ["boolean", "string", "object"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"deprecated": true,
|
||||
@@ -688,9 +699,9 @@
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
},
|
||||
"internal": {"type": "boolean"},
|
||||
"enable_ipv6": {"type": "boolean"},
|
||||
"attachable": {"type": "boolean"},
|
||||
"internal": {"type": ["boolean", "string"]},
|
||||
"enable_ipv6": {"type": ["boolean", "string"]},
|
||||
"attachable": {"type": ["boolean", "string"]},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -710,7 +721,7 @@
|
||||
}
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"type": ["boolean", "string", "object"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"deprecated": true,
|
||||
@@ -734,7 +745,7 @@
|
||||
"environment": {"type": "string"},
|
||||
"file": {"type": "string"},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"type": ["boolean", "string", "object"],
|
||||
"properties": {
|
||||
"name": {"type": "string"}
|
||||
}
|
||||
@@ -762,7 +773,7 @@
|
||||
"environment": {"type": "string"},
|
||||
"file": {"type": "string"},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"type": ["boolean", "string", "object"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"deprecated": true,
|
||||
@@ -801,7 +812,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"type": ["boolean", "string"],
|
||||
"default": true
|
||||
}
|
||||
},
|
||||
@@ -855,7 +866,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string"},
|
||||
"weight": {"type": "integer"}
|
||||
"weight": {"type": ["integer", "string"]}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
@@ -871,7 +882,7 @@
|
||||
"target": {"type": "string"},
|
||||
"uid": {"type": "string"},
|
||||
"gid": {"type": "string"},
|
||||
"mode": {"type": "number"}
|
||||
"mode": {"type": ["number", "string"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -884,12 +895,12 @@
|
||||
"patternProperties": {
|
||||
"^[a-z]+$": {
|
||||
"oneOf": [
|
||||
{"type": "integer"},
|
||||
{"type": ["integer", "string"]},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"hard": {"type": "integer"},
|
||||
"soft": {"type": "integer"}
|
||||
"hard": {"type": ["integer", "string"]},
|
||||
"soft": {"type": ["integer", "string"]}
|
||||
},
|
||||
"required": ["soft", "hard"],
|
||||
"additionalProperties": false,
|
||||
|
123
vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml
generated
vendored
Normal file
123
vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
name: ${VARIABLE}
|
||||
services:
|
||||
foo:
|
||||
deploy:
|
||||
mode: ${VARIABLE}
|
||||
replicas: ${VARIABLE}
|
||||
rollback_config:
|
||||
parallelism: ${VARIABLE}
|
||||
delay: ${VARIABLE}
|
||||
failure_action: ${VARIABLE}
|
||||
monitor: ${VARIABLE}
|
||||
max_failure_ratio: ${VARIABLE}
|
||||
update_config:
|
||||
parallelism: ${VARIABLE}
|
||||
delay: ${VARIABLE}
|
||||
failure_action: ${VARIABLE}
|
||||
monitor: ${VARIABLE}
|
||||
max_failure_ratio: ${VARIABLE}
|
||||
resources:
|
||||
limits:
|
||||
memory: ${VARIABLE}
|
||||
reservations:
|
||||
memory: ${VARIABLE}
|
||||
generic_resources:
|
||||
- discrete_resource_spec:
|
||||
kind: ${VARIABLE}
|
||||
value: ${VARIABLE}
|
||||
- discrete_resource_spec:
|
||||
kind: ${VARIABLE}
|
||||
value: ${VARIABLE}
|
||||
restart_policy:
|
||||
condition: ${VARIABLE}
|
||||
delay: ${VARIABLE}
|
||||
max_attempts: ${VARIABLE}
|
||||
window: ${VARIABLE}
|
||||
placement:
|
||||
max_replicas_per_node: ${VARIABLE}
|
||||
preferences:
|
||||
- spread: ${VARIABLE}
|
||||
endpoint_mode: ${VARIABLE}
|
||||
expose:
|
||||
- ${VARIABLE}
|
||||
external_links:
|
||||
- ${VARIABLE}
|
||||
extra_hosts:
|
||||
- ${VARIABLE}
|
||||
hostname: ${VARIABLE}
|
||||
|
||||
healthcheck:
|
||||
test: ${VARIABLE}
|
||||
interval: ${VARIABLE}
|
||||
timeout: ${VARIABLE}
|
||||
retries: ${VARIABLE}
|
||||
start_period: ${VARIABLE}
|
||||
start_interval: ${VARIABLE}
|
||||
image: ${VARIABLE}
|
||||
mac_address: ${VARIABLE}
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- ${VARIABLE}
|
||||
other-network:
|
||||
ipv4_address: ${VARIABLE}
|
||||
ipv6_address: ${VARIABLE}
|
||||
mac_address: ${VARIABLE}
|
||||
ports:
|
||||
- ${VARIABLE}
|
||||
privileged: ${VARIABLE}
|
||||
read_only: ${VARIABLE}
|
||||
restart: ${VARIABLE}
|
||||
secrets:
|
||||
- source: ${VARIABLE}
|
||||
target: ${VARIABLE}
|
||||
uid: ${VARIABLE}
|
||||
gid: ${VARIABLE}
|
||||
mode: ${VARIABLE}
|
||||
stdin_open: ${VARIABLE}
|
||||
stop_grace_period: ${VARIABLE}
|
||||
stop_signal: ${VARIABLE}
|
||||
storage_opt:
|
||||
size: ${VARIABLE}
|
||||
sysctls:
|
||||
net.core.somaxconn: ${VARIABLE}
|
||||
tmpfs:
|
||||
- ${VARIABLE}
|
||||
tty: ${VARIABLE}
|
||||
ulimits:
|
||||
nproc: ${VARIABLE}
|
||||
nofile:
|
||||
soft: ${VARIABLE}
|
||||
hard: ${VARIABLE}
|
||||
user: ${VARIABLE}
|
||||
volumes:
|
||||
- ${VARIABLE}:${VARIABLE}
|
||||
- type: tmpfs
|
||||
target: ${VARIABLE}
|
||||
tmpfs:
|
||||
size: ${VARIABLE}
|
||||
|
||||
networks:
|
||||
network:
|
||||
ipam:
|
||||
driver: ${VARIABLE}
|
||||
config:
|
||||
- subnet: ${VARIABLE}
|
||||
ip_range: ${VARIABLE}
|
||||
gateway: ${VARIABLE}
|
||||
aux_addresses:
|
||||
host1: ${VARIABLE}
|
||||
external-network:
|
||||
external: ${VARIABLE}
|
||||
|
||||
volumes:
|
||||
external-volume:
|
||||
external: ${VARIABLE}
|
||||
|
||||
configs:
|
||||
config1:
|
||||
external: ${VARIABLE}
|
||||
|
||||
secrets:
|
||||
secret1:
|
||||
external: ${VARIABLE}
|
1
vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go
generated
vendored
1
vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go
generated
vendored
@@ -25,6 +25,7 @@ var defaultValues = map[tree.Path]transformFunc{}
|
||||
func init() {
|
||||
defaultValues["services.*.build"] = defaultBuildContext
|
||||
defaultValues["services.*.secrets.*"] = defaultSecretMount
|
||||
defaultValues["services.*.ports.*"] = portDefaults
|
||||
}
|
||||
|
||||
// SetDefaultValues transforms a compose model to set default values to missing attributes
|
||||
|
15
vendor/github.com/compose-spec/compose-go/v2/transform/ports.go
generated
vendored
15
vendor/github.com/compose-spec/compose-go/v2/transform/ports.go
generated
vendored
@@ -87,3 +87,18 @@ func encode(v any) (map[string]any, error) {
|
||||
err = decoder.Decode(v)
|
||||
return m, err
|
||||
}
|
||||
|
||||
func portDefaults(data any, _ tree.Path, _ bool) (any, error) {
|
||||
switch v := data.(type) {
|
||||
case map[string]any:
|
||||
if _, ok := v["protocol"]; !ok {
|
||||
v["protocol"] = "tcp"
|
||||
}
|
||||
if _, ok := v["mode"]; !ok {
|
||||
v["mode"] = "ingress"
|
||||
}
|
||||
return v, nil
|
||||
default:
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
13
vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go
generated
vendored
13
vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go
generated
vendored
@@ -1341,7 +1341,12 @@ func deriveDeepCopy_21(dst, src *NetworkConfig) {
|
||||
} else {
|
||||
dst.Labels = nil
|
||||
}
|
||||
dst.EnableIPv6 = src.EnableIPv6
|
||||
if src.EnableIPv6 == nil {
|
||||
dst.EnableIPv6 = nil
|
||||
} else {
|
||||
dst.EnableIPv6 = new(bool)
|
||||
*dst.EnableIPv6 = *src.EnableIPv6
|
||||
}
|
||||
if src.Extensions != nil {
|
||||
dst.Extensions = make(map[string]any, len(src.Extensions))
|
||||
src.Extensions.DeepCopy(dst.Extensions)
|
||||
@@ -1809,6 +1814,12 @@ func deriveDeepCopy_40(dst, src *Trigger) {
|
||||
}
|
||||
copy(dst.Ignore, src.Ignore)
|
||||
}
|
||||
if src.Extensions != nil {
|
||||
dst.Extensions = make(map[string]any, len(src.Extensions))
|
||||
src.Extensions.DeepCopy(dst.Extensions)
|
||||
} else {
|
||||
dst.Extensions = nil
|
||||
}
|
||||
}
|
||||
|
||||
// deriveDeepCopy_41 recursively copies the contents of src into dst.
|
||||
|
9
vendor/github.com/compose-spec/compose-go/v2/types/develop.go
generated
vendored
9
vendor/github.com/compose-spec/compose-go/v2/types/develop.go
generated
vendored
@@ -31,8 +31,9 @@ const (
|
||||
)
|
||||
|
||||
type Trigger struct {
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Action WatchAction `yaml:"action" json:"action"`
|
||||
Target string `yaml:"target,omitempty" json:"target,omitempty"`
|
||||
Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Action WatchAction `yaml:"action" json:"action"`
|
||||
Target string `yaml:"target,omitempty" json:"target,omitempty"`
|
||||
Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"`
|
||||
Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
|
||||
}
|
||||
|
32
vendor/github.com/compose-spec/compose-go/v2/types/types.go
generated
vendored
32
vendor/github.com/compose-spec/compose-go/v2/types/types.go
generated
vendored
@@ -782,9 +782,41 @@ type ExtendsConfig struct {
|
||||
// SecretConfig for a secret
|
||||
type SecretConfig FileObjectConfig
|
||||
|
||||
// MarshalYAML makes SecretConfig implement yaml.Marshaller
|
||||
func (s SecretConfig) MarshalYAML() (interface{}, error) {
|
||||
// secret content is set while loading model. Never marshall it
|
||||
s.Content = ""
|
||||
return FileObjectConfig(s), nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes SecretConfig implement json.Marshaller
|
||||
func (s SecretConfig) MarshalJSON() ([]byte, error) {
|
||||
// secret content is set while loading model. Never marshall it
|
||||
s.Content = ""
|
||||
return json.Marshal(FileObjectConfig(s))
|
||||
}
|
||||
|
||||
// ConfigObjConfig is the config for the swarm "Config" object
|
||||
type ConfigObjConfig FileObjectConfig
|
||||
|
||||
// MarshalYAML makes ConfigObjConfig implement yaml.Marshaller
|
||||
func (s ConfigObjConfig) MarshalYAML() (interface{}, error) {
|
||||
// config content may have been set from environment while loading model. Marshall actual source
|
||||
if s.Environment != "" {
|
||||
s.Content = ""
|
||||
}
|
||||
return FileObjectConfig(s), nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes ConfigObjConfig implement json.Marshaller
|
||||
func (s ConfigObjConfig) MarshalJSON() ([]byte, error) {
|
||||
// config content may have been set from environment while loading model. Marshall actual source
|
||||
if s.Environment != "" {
|
||||
s.Content = ""
|
||||
}
|
||||
return json.Marshal(FileObjectConfig(s))
|
||||
}
|
||||
|
||||
type IncludeConfig struct {
|
||||
Path StringList `yaml:"path,omitempty" json:"path,omitempty"`
|
||||
ProjectDirectory string `yaml:"project_directory,omitempty" json:"project_directory,omitempty"`
|
||||
|
92
vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go
generated
vendored
Normal file
92
vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ResolveSymbolicLink converts the section of an absolute path if it is a
|
||||
// symbolic link
|
||||
//
|
||||
// Parameters:
|
||||
// - path: an absolute path
|
||||
//
|
||||
// Returns:
|
||||
// - converted path if it has a symbolic link or the same path if there is
|
||||
// no symbolic link
|
||||
func ResolveSymbolicLink(path string) (string, error) {
|
||||
sym, part, err := getSymbolinkLink(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if sym == "" && part == "" {
|
||||
// no symbolic link detected
|
||||
return path, nil
|
||||
}
|
||||
return strings.Replace(path, part, sym, 1), nil
|
||||
|
||||
}
|
||||
|
||||
// getSymbolinkLink parses all parts of the path and returns the
|
||||
// the symbolic link part as well as the correspondent original part
|
||||
// Parameters:
|
||||
// - path: an absolute path
|
||||
//
|
||||
// Returns:
|
||||
// - string section of the path that is a symbolic link
|
||||
// - string correspondent path section of the symbolic link
|
||||
// - An error
|
||||
func getSymbolinkLink(path string) (string, string, error) {
|
||||
parts := strings.Split(path, string(os.PathSeparator))
|
||||
|
||||
// Reconstruct the path step by step, checking each component
|
||||
var currentPath string
|
||||
if filepath.IsAbs(path) {
|
||||
currentPath = string(os.PathSeparator)
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
currentPath = filepath.Join(currentPath, part)
|
||||
|
||||
if isSymLink := isSymbolicLink(currentPath); isSymLink {
|
||||
// return symbolic link, and correspondent part
|
||||
target, err := filepath.EvalSymlinks(currentPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return target, currentPath, nil
|
||||
}
|
||||
}
|
||||
return "", "", nil // no symbolic link
|
||||
}
|
||||
|
||||
// isSymbolicLink validates if the path is a symbolic link
|
||||
func isSymbolicLink(path string) bool {
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the file mode indicates a symbolic link
|
||||
return info.Mode()&os.ModeSymlink != 0
|
||||
}
|
6
vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go
generated
vendored
6
vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go
generated
vendored
@@ -32,8 +32,10 @@ func StringToBool(s string) bool {
|
||||
func GetAsEqualsMap(em []string) map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, v := range em {
|
||||
kv := strings.SplitN(v, "=", 2)
|
||||
m[kv[0]] = kv[1]
|
||||
key, val, found := strings.Cut(v, "=")
|
||||
if found {
|
||||
m[key] = val
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
62
vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go
generated
vendored
62
vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
inUserNS bool
|
||||
nsOnce sync.Once
|
||||
)
|
||||
|
||||
// RunningInUserNS detects whether we are currently running in a user namespace.
|
||||
// Originally copied from github.com/lxc/lxd/shared/util.go
|
||||
func RunningInUserNS() bool {
|
||||
nsOnce.Do(func() {
|
||||
file, err := os.Open("/proc/self/uid_map")
|
||||
if err != nil {
|
||||
// This kernel-provided file only exists if user namespaces are supported
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
buf := bufio.NewReader(file)
|
||||
l, _, err := buf.ReadLine()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
line := string(l)
|
||||
var a, b, c int64
|
||||
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
|
||||
|
||||
/*
|
||||
* We assume we are in the initial user namespace if we have a full
|
||||
* range - 4294967295 uids starting at uid 0.
|
||||
*/
|
||||
if a == 0 && b == 0 && c == 4294967295 {
|
||||
return
|
||||
}
|
||||
inUserNS = true
|
||||
})
|
||||
return inUserNS
|
||||
}
|
25
vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go
generated
vendored
25
vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
//go:build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userns
|
||||
|
||||
// RunningInUserNS is a stub for non-Linux systems
|
||||
// Always returns false
|
||||
func RunningInUserNS() bool {
|
||||
return false
|
||||
}
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.7.19+unknown"
|
||||
Version = "1.7.21+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
8
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
8
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
@@ -2,7 +2,7 @@
|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/typeurl)
|
||||
[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
||||
[](https://codecov.io/gh/containerd/typeurl)
|
||||
[](https://codecov.io/gh/containerd/typeurl)
|
||||
[](https://goreportcard.com/report/github.com/containerd/typeurl)
|
||||
|
||||
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||
@@ -13,8 +13,8 @@ This package helps when types are sent over a ttrpc/GRPC API and marshaled as a
|
||||
|
||||
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
29
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
29
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
@@ -27,6 +27,7 @@ import (
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -122,6 +123,9 @@ func TypeURL(v interface{}) (string, error) {
|
||||
|
||||
// Is returns true if the type of the Any is the same as v.
|
||||
func Is(any Any, v interface{}) bool {
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
// call to check that v is a pointer
|
||||
tryDereference(v)
|
||||
url, err := TypeURL(v)
|
||||
@@ -193,6 +197,31 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalProto(from Any) *anypb.Any {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pbany, ok := from.(*anypb.Any); ok {
|
||||
return pbany
|
||||
}
|
||||
|
||||
return &anypb.Any{
|
||||
TypeUrl: from.GetTypeUrl(),
|
||||
Value: from.GetValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
anyType, err := MarshalAny(from)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MarshalProto(anyType), nil
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
|
3
vendor/github.com/docker/cli/cli-plugins/socket/socket.go
generated
vendored
3
vendor/github.com/docker/cli/cli-plugins/socket/socket.go
generated
vendored
@@ -95,6 +95,9 @@ func (pl *PluginServer) Addr() net.Addr {
|
||||
//
|
||||
// The error value is that of the underlying [net.Listner.Close] call.
|
||||
func (pl *PluginServer) Close() error {
|
||||
if pl == nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Trace("Closing plugin server")
|
||||
// Close connections first to ensure the connections get io.EOF instead
|
||||
// of a connection reset.
|
||||
|
2
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
@@ -324,7 +324,7 @@ func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigF
|
||||
if len(configFile.HTTPHeaders) > 0 {
|
||||
opts = append(opts, client.WithHTTPHeaders(configFile.HTTPHeaders))
|
||||
}
|
||||
opts = append(opts, client.WithUserAgent(UserAgent()))
|
||||
opts = append(opts, withCustomHeadersFromEnv(), client.WithUserAgent(UserAgent()))
|
||||
return client.NewClientWithOpts(opts...)
|
||||
}
|
||||
|
||||
|
109
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
109
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
@@ -2,13 +2,18 @@ package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CLIOption is a functional argument to apply options to a [DockerCli]. These
|
||||
@@ -108,3 +113,107 @@ func WithAPIClient(c client.APIClient) CLIOption {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// envOverrideHTTPHeaders is the name of the environment-variable that can be
|
||||
// used to set custom HTTP headers to be sent by the client. This environment
|
||||
// variable is the equivalent to the HttpHeaders field in the configuration
|
||||
// file.
|
||||
//
|
||||
// WARNING: If both config and environment-variable are set, the environment
|
||||
// variable currently overrides all headers set in the configuration file.
|
||||
// This behavior may change in a future update, as we are considering the
|
||||
// environment variable to be appending to existing headers (and to only
|
||||
// override headers with the same name).
|
||||
//
|
||||
// While this env-var allows for custom headers to be set, it does not allow
|
||||
// for built-in headers (such as "User-Agent", if set) to be overridden.
|
||||
// Also see [client.WithHTTPHeaders] and [client.WithUserAgent].
|
||||
//
|
||||
// This environment variable can be used in situations where headers must be
|
||||
// set for a specific invocation of the CLI, but should not be set by default,
|
||||
// and therefore cannot be set in the config-file.
|
||||
//
|
||||
// envOverrideHTTPHeaders accepts a comma-separated (CSV) list of key=value pairs,
|
||||
// where key must be a non-empty, valid MIME header format. Whitespaces surrounding
|
||||
// the key are trimmed, and the key is normalised. Whitespaces in values are
|
||||
// preserved, but "key=value" pairs with an empty value (e.g. "key=") are ignored.
|
||||
// Tuples without a "=" produce an error.
|
||||
//
|
||||
// It follows CSV rules for escaping, allowing "key=value" pairs to be quoted
|
||||
// if they must contain commas, which allows for multiple values for a single
|
||||
// header to be set. If a key is repeated in the list, later values override
|
||||
// prior values.
|
||||
//
|
||||
// For example, the following value:
|
||||
//
|
||||
// one=one-value,"two=two,value","three= a value with whitespace ",four=,five=five=one,five=five-two
|
||||
//
|
||||
// Produces four headers (four is omitted as it has an empty value set):
|
||||
//
|
||||
// - one (value is "one-value")
|
||||
// - two (value is "two,value")
|
||||
// - three (value is " a value with whitespace ")
|
||||
// - five (value is "five-two", the later value has overridden the prior value)
|
||||
const envOverrideHTTPHeaders = "DOCKER_CUSTOM_HEADERS"
|
||||
|
||||
// withCustomHeadersFromEnv overriding custom HTTP headers to be sent by the
|
||||
// client through the [envOverrideHTTPHeaders] environment-variable. This
|
||||
// environment variable is the equivalent to the HttpHeaders field in the
|
||||
// configuration file.
|
||||
//
|
||||
// WARNING: If both config and environment-variable are set, the environment-
|
||||
// variable currently overrides all headers set in the configuration file.
|
||||
// This behavior may change in a future update, as we are considering the
|
||||
// environment-variable to be appending to existing headers (and to only
|
||||
// override headers with the same name).
|
||||
//
|
||||
// TODO(thaJeztah): this is a client Option, and should be moved to the client. It is non-exported for that reason.
|
||||
func withCustomHeadersFromEnv() client.Opt {
|
||||
return func(apiClient *client.Client) error {
|
||||
value := os.Getenv(envOverrideHTTPHeaders)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(errors.Errorf("failed to parse custom headers from %s environment variable: value must be formatted as comma-separated key=value pairs", envOverrideHTTPHeaders))
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
env := map[string]string{}
|
||||
for _, kv := range fields {
|
||||
k, v, hasValue := strings.Cut(kv, "=")
|
||||
|
||||
// Only strip whitespace in keys; preserve whitespace in values.
|
||||
k = strings.TrimSpace(k)
|
||||
|
||||
if k == "" {
|
||||
return errdefs.InvalidParameter(errors.Errorf(`failed to set custom headers from %s environment variable: value contains a key=value pair with an empty key: '%s'`, envOverrideHTTPHeaders, kv))
|
||||
}
|
||||
|
||||
// We don't currently allow empty key=value pairs, and produce an error.
|
||||
// This is something we could allow in future (e.g. to read value
|
||||
// from an environment variable with the same name). In the meantime,
|
||||
// produce an error to prevent users from depending on this.
|
||||
if !hasValue {
|
||||
return errdefs.InvalidParameter(errors.Errorf(`failed to set custom headers from %s environment variable: missing "=" in key=value pair: '%s'`, envOverrideHTTPHeaders, kv))
|
||||
}
|
||||
|
||||
env[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
|
||||
if len(env) == 0 {
|
||||
// We should probably not hit this case, as we don't skip values
|
||||
// (only return errors), but we don't want to discard existing
|
||||
// headers with an empty set.
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(thaJeztah): add a client.WithExtraHTTPHeaders() function to allow these headers to be _added_ to existing ones, instead of _replacing_
|
||||
// see https://github.com/docker/cli/pull/5098#issuecomment-2147403871 (when updating, also update the WARNING in the function and env-var GoDoc)
|
||||
return client.WithHTTPHeaders(env)(apiClient)
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/docker/cli/cli/command/formatter/container.go
generated
vendored
4
vendor/github.com/docker/cli/cli/command/formatter/container.go
generated
vendored
@@ -5,6 +5,7 @@ package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -331,7 +332,8 @@ func DisplayablePorts(ports []types.Port) string {
|
||||
portKey := port.Type
|
||||
if port.IP != "" {
|
||||
if port.PublicPort != current {
|
||||
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
|
||||
hAddrPort := net.JoinHostPort(port.IP, strconv.Itoa(int(port.PublicPort)))
|
||||
hostMappings = append(hostMappings, fmt.Sprintf("%s->%d/%s", hAddrPort, port.PrivatePort, port.Type))
|
||||
continue
|
||||
}
|
||||
portKey = port.IP + "/" + port.Type
|
||||
|
113
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
113
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
@@ -1,10 +1,8 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -18,7 +16,6 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -44,7 +41,7 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
|
||||
default:
|
||||
}
|
||||
|
||||
err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
|
||||
authConfig, err = PromptUserForCredentials(ctx, cli, "", "", authConfig.Username, indexServer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -89,8 +86,32 @@ func GetDefaultAuthConfig(cfg *configfile.ConfigFile, checkCredStore bool, serve
|
||||
return registrytypes.AuthConfig(authconfig), nil
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes.AuthConfig, isDefaultRegistry bool) error {
|
||||
// ConfigureAuth handles prompting of user's username and password if needed.
|
||||
// Deprecated: use PromptUserForCredentials instead.
|
||||
func ConfigureAuth(ctx context.Context, cli Cli, flUser, flPassword string, authConfig *registrytypes.AuthConfig, _ bool) error {
|
||||
defaultUsername := authConfig.Username
|
||||
serverAddress := authConfig.ServerAddress
|
||||
|
||||
newAuthConfig, err := PromptUserForCredentials(ctx, cli, flUser, flPassword, defaultUsername, serverAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authConfig.Username = newAuthConfig.Username
|
||||
authConfig.Password = newAuthConfig.Password
|
||||
return nil
|
||||
}
|
||||
|
||||
// PromptUserForCredentials handles the CLI prompt for the user to input
|
||||
// credentials.
|
||||
// If argUser is not empty, then the user is only prompted for their password.
|
||||
// If argPassword is not empty, then the user is only prompted for their username
|
||||
// If neither argUser nor argPassword are empty, then the user is not prompted and
|
||||
// an AuthConfig is returned with those values.
|
||||
// If defaultUsername is not empty, the username prompt includes that username
|
||||
// and the user can hit enter without inputting a username to use that default
|
||||
// username.
|
||||
func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword, defaultUsername, serverAddress string) (authConfig registrytypes.AuthConfig, err error) {
|
||||
// On Windows, force the use of the regular OS stdin stream.
|
||||
//
|
||||
// See:
|
||||
@@ -110,13 +131,14 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes
|
||||
// Linux will hit this if you attempt `cat | docker login`, and Windows
|
||||
// will hit this if you attempt docker login from mintty where stdin
|
||||
// is a pipe, not a character based console.
|
||||
if flPassword == "" && !cli.In().IsTerminal() {
|
||||
return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
if argPassword == "" && !cli.In().IsTerminal() {
|
||||
return authConfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
}
|
||||
|
||||
authconfig.Username = strings.TrimSpace(authconfig.Username)
|
||||
isDefaultRegistry := serverAddress == registry.IndexServer
|
||||
defaultUsername = strings.TrimSpace(defaultUsername)
|
||||
|
||||
if flUser = strings.TrimSpace(flUser); flUser == "" {
|
||||
if argUser = strings.TrimSpace(argUser); argUser == "" {
|
||||
if isDefaultRegistry {
|
||||
// if this is a default registry (docker hub), then display the following message.
|
||||
fmt.Fprintln(cli.Out(), "Log in with your Docker ID or email address to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com/ to create one.")
|
||||
@@ -125,62 +147,45 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes
|
||||
fmt.Fprintln(cli.Out())
|
||||
}
|
||||
}
|
||||
promptWithDefault(cli.Out(), "Username", authconfig.Username)
|
||||
var err error
|
||||
flUser, err = readInput(cli.In())
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
var prompt string
|
||||
if defaultUsername == "" {
|
||||
prompt = "Username: "
|
||||
} else {
|
||||
prompt = fmt.Sprintf("Username (%s): ", defaultUsername)
|
||||
}
|
||||
if flUser == "" {
|
||||
flUser = authconfig.Username
|
||||
argUser, err = PromptForInput(ctx, cli.In(), cli.Out(), prompt)
|
||||
if err != nil {
|
||||
return authConfig, err
|
||||
}
|
||||
if argUser == "" {
|
||||
argUser = defaultUsername
|
||||
}
|
||||
}
|
||||
if flUser == "" {
|
||||
return errors.Errorf("Error: Non-null Username Required")
|
||||
if argUser == "" {
|
||||
return authConfig, errors.Errorf("Error: Non-null Username Required")
|
||||
}
|
||||
if flPassword == "" {
|
||||
oldState, err := term.SaveState(cli.In().FD())
|
||||
if argPassword == "" {
|
||||
restoreInput, err := DisableInputEcho(cli.In())
|
||||
if err != nil {
|
||||
return err
|
||||
return authConfig, err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Password: ")
|
||||
_ = term.DisableEcho(cli.In().FD(), oldState)
|
||||
defer func() {
|
||||
_ = term.RestoreTerminal(cli.In().FD(), oldState)
|
||||
}()
|
||||
flPassword, err = readInput(cli.In())
|
||||
defer restoreInput()
|
||||
|
||||
argPassword, err = PromptForInput(ctx, cli.In(), cli.Out(), "Password: ")
|
||||
if err != nil {
|
||||
return err
|
||||
return authConfig, err
|
||||
}
|
||||
fmt.Fprint(cli.Out(), "\n")
|
||||
if flPassword == "" {
|
||||
return errors.Errorf("Error: Password Required")
|
||||
if argPassword == "" {
|
||||
return authConfig, errors.Errorf("Error: Password Required")
|
||||
}
|
||||
}
|
||||
|
||||
authconfig.Username = flUser
|
||||
authconfig.Password = flPassword
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readInput reads, and returns user input from in. It tries to return a
|
||||
// single line, not including the end-of-line bytes, and trims leading
|
||||
// and trailing whitespace.
|
||||
func readInput(in io.Reader) (string, error) {
|
||||
line, _, err := bufio.NewReader(in).ReadLine()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error while reading input")
|
||||
}
|
||||
return strings.TrimSpace(string(line)), nil
|
||||
}
|
||||
|
||||
func promptWithDefault(out io.Writer, prompt string, configDefault string) {
|
||||
if configDefault == "" {
|
||||
fmt.Fprintf(out, "%s: ", prompt)
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s (%s): ", prompt, configDefault)
|
||||
}
|
||||
authConfig.Username = argUser
|
||||
authConfig.Password = argPassword
|
||||
authConfig.ServerAddress = serverAddress
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user