mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-09-02 17:09:08 +08:00
Compare commits
63 Commits
v0.9.0-rc1
...
v0.9
Author | SHA1 | Date | |
---|---|---|---|
![]() |
2af40b75b7 | ||
![]() |
83f3691c15 | ||
![]() |
4e93e87991 | ||
![]() |
3f1516d3fe | ||
![]() |
09d1e1ee99 | ||
![]() |
2e9906ba20 | ||
![]() |
ed00243a0c | ||
![]() |
1223e759a4 | ||
![]() |
4fd3ec1a50 | ||
![]() |
7f9cad1e4e | ||
![]() |
437b8b140f | ||
![]() |
8f0d9bd71f | ||
![]() |
9c22be5d9c | ||
![]() |
42dea89247 | ||
![]() |
982a332679 | ||
![]() |
441853f189 | ||
![]() |
611329fc7f | ||
![]() |
f3c135e583 | ||
![]() |
7f84582b37 | ||
![]() |
297526c49d | ||
![]() |
d01d394a2b | ||
![]() |
17d4369866 | ||
![]() |
fb5e1393a4 | ||
![]() |
18dbde9ed6 | ||
![]() |
da1f4b8496 | ||
![]() |
5b2e1d3ce4 | ||
![]() |
7d8a6bc1d7 | ||
![]() |
a378f8095e | ||
![]() |
005bc009e8 | ||
![]() |
3bc7d4bec6 | ||
![]() |
96c1b05238 | ||
![]() |
98f9f806f3 | ||
![]() |
c834ba1389 | ||
![]() |
cab437adef | ||
![]() |
eefa8188e1 | ||
![]() |
1d8db8a738 | ||
![]() |
75ddc5b811 | ||
![]() |
17dc0e1108 | ||
![]() |
64ac6c9621 | ||
![]() |
a7753ea781 | ||
![]() |
12a6eb5b22 | ||
![]() |
74b21258b6 | ||
![]() |
2f9d46ce27 | ||
![]() |
7b660c4e30 | ||
![]() |
406799eb1c | ||
![]() |
ef0cbf20f4 | ||
![]() |
7f572eb044 | ||
![]() |
18023d7f32 | ||
![]() |
4983b98005 | ||
![]() |
8675e02cea | ||
![]() |
45fc3bf842 | ||
![]() |
cf809aec47 | ||
![]() |
cceb1acca8 | ||
![]() |
e620c40a14 | ||
![]() |
e1590bf68b | ||
![]() |
bad07943b5 | ||
![]() |
603595559f | ||
![]() |
febcc25d1a | ||
![]() |
e3c0e34b33 | ||
![]() |
3f5974b7f9 | ||
![]() |
7ab3dc080b | ||
![]() |
0883beac30 | ||
![]() |
f9102a3295 |
127
.github/workflows/build.yml
vendored
127
.github/workflows/build.yml
vendored
@@ -22,9 +22,63 @@ env:
|
|||||||
RELEASE_OUT: "./release-out"
|
RELEASE_OUT: "./release-out"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: test
|
||||||
|
set: |
|
||||||
|
*.cache-from=type=gha,scope=test
|
||||||
|
*.cache-to=type=gha,scope=test
|
||||||
|
-
|
||||||
|
name: Upload coverage
|
||||||
|
uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
file: ./coverage/coverage.txt
|
||||||
|
|
||||||
|
prepare:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Create matrix
|
||||||
|
id: platforms
|
||||||
|
run: |
|
||||||
|
echo ::set-output name=matrix::$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')
|
||||||
|
-
|
||||||
|
name: Show matrix
|
||||||
|
run: |
|
||||||
|
echo ${{ steps.platforms.outputs.matrix }}
|
||||||
|
|
||||||
|
binaries:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- prepare
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
platform=${{ matrix.platform }}
|
||||||
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -37,24 +91,14 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
-
|
-
|
||||||
name: Test
|
name: Build
|
||||||
run: |
|
uses: docker/bake-action@v2
|
||||||
make test
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
uses: codecov/codecov-action@v3
|
|
||||||
with:
|
with:
|
||||||
file: ./coverage/coverage.txt
|
targets: release
|
||||||
-
|
set: |
|
||||||
name: Expose GitHub Runtime
|
*.platform=${{ matrix.platform }}
|
||||||
uses: crazy-max/ghaction-github-runtime@906832f62b7baa936e3fbef72b029308af505ee7
|
*.cache-from=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
|
||||||
-
|
*.cache-to=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
||||||
name: Build binaries
|
|
||||||
run: |
|
|
||||||
make release
|
|
||||||
env:
|
|
||||||
CACHE_FROM: type=gha,scope=release
|
|
||||||
CACHE_TO: type=gha,scope=release
|
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
@@ -62,6 +106,22 @@ jobs:
|
|||||||
name: buildx
|
name: buildx
|
||||||
path: ${{ env.RELEASE_OUT }}/*
|
path: ${{ env.RELEASE_OUT }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
|
bin-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
@@ -90,6 +150,35 @@ jobs:
|
|||||||
${{ steps.meta.outputs.bake-file }}
|
${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
set: |
|
||||||
|
*.cache-from=type=gha,scope=bin-image
|
||||||
|
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||||
|
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- binaries
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Download binaries
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: buildx
|
||||||
|
path: ${{ env.RELEASE_OUT }}
|
||||||
|
-
|
||||||
|
name: Create checksums
|
||||||
|
run: ./hack/hash-files
|
||||||
|
-
|
||||||
|
name: List artifacts
|
||||||
|
run: |
|
||||||
|
tree -nh ${{ env.RELEASE_OUT }}
|
||||||
|
-
|
||||||
|
name: Check artifacts
|
||||||
|
run: |
|
||||||
|
find ${{ env.RELEASE_OUT }} -type f -exec file -e ascii -- {} +
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
@@ -122,4 +211,4 @@ jobs:
|
|||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v2
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: binaries-cross
|
targets: binaries
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
name: docs
|
name: docs-release
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [ published ]
|
types: [ released ]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
open-pr:
|
open-pr:
|
118
.github/workflows/docs-upstream.yml
vendored
Normal file
118
.github/workflows/docs-upstream.yml
vendored
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
# this workflow runs the remote validate bake target from docker/docker.github.io
|
||||||
|
# to check if yaml reference docs and markdown files used in this repo are still valid
|
||||||
|
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
||||||
|
name: docs-upstream
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/docs-upstream.yml'
|
||||||
|
- 'docs/**'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/docs-upstream.yml'
|
||||||
|
- 'docs/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docs-yaml:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
-
|
||||||
|
name: Build reference YAML docs
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: update-docs
|
||||||
|
set: |
|
||||||
|
*.output=/tmp/buildx-docs
|
||||||
|
*.cache-from=type=gha,scope=docs-yaml
|
||||||
|
*.cache-to=type=gha,scope=docs-yaml,mode=max
|
||||||
|
env:
|
||||||
|
DOCS_FORMATS: yaml
|
||||||
|
-
|
||||||
|
name: Upload reference YAML docs
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: docs-yaml
|
||||||
|
path: /tmp/buildx-docs/out/reference
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
validate:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- docs-yaml
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: docker/docker.github.io
|
||||||
|
-
|
||||||
|
name: Install js-yaml
|
||||||
|
run: npm install js-yaml
|
||||||
|
-
|
||||||
|
# use the actual buildx ref that triggers this workflow, so we make
|
||||||
|
# sure pages fetched by docs repo are still valid
|
||||||
|
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/_config.yml#L164-L173
|
||||||
|
name: Set correct ref to fetch remote resources
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const fs = require('fs');
|
||||||
|
const yaml = require('js-yaml');
|
||||||
|
|
||||||
|
const configFile = '_config.yml'
|
||||||
|
const config = yaml.load(fs.readFileSync(configFile, 'utf8'));
|
||||||
|
for (const remote of config['fetch-remote']) {
|
||||||
|
if (remote['repo'] != 'https://github.com/docker/buildx') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
remote['ref'] = "${{ github.ref }}";
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
fs.writeFileSync(configFile, yaml.dump(config), 'utf8')
|
||||||
|
} catch (err) {
|
||||||
|
console.error(err.message)
|
||||||
|
process.exit(1)
|
||||||
|
}
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
# print docs jekyll config updated in previous step
|
||||||
|
yq _config.yml
|
||||||
|
# cleanup reference yaml docs and js-yaml module
|
||||||
|
rm -rf ./_data/buildx/* ./node_modules
|
||||||
|
-
|
||||||
|
name: Download built reference YAML docs
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: docs-yaml
|
||||||
|
path: ./_data/buildx/
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
-
|
||||||
|
name: Validate
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: validate
|
||||||
|
set: |
|
||||||
|
*.cache-from=type=gha,scope=docs-upstream
|
||||||
|
*.cache-to=type=gha,scope=docs-upstream,mode=max
|
20
.github/workflows/validate.yml
vendored
20
.github/workflows/validate.yml
vendored
@@ -40,23 +40,3 @@ jobs:
|
|||||||
name: Run
|
name: Run
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.target }}
|
make ${{ matrix.target }}
|
||||||
|
|
||||||
validate-docs-yaml:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- validate
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Run
|
|
||||||
run: |
|
|
||||||
make docs
|
|
||||||
env:
|
|
||||||
FORMATS: yaml
|
|
||||||
|
@@ -152,6 +152,7 @@ made through a pull request.
|
|||||||
people = [
|
people = [
|
||||||
"akihirosuda",
|
"akihirosuda",
|
||||||
"crazy-max",
|
"crazy-max",
|
||||||
|
"jedevc",
|
||||||
"tiborvass",
|
"tiborvass",
|
||||||
"tonistiigi",
|
"tonistiigi",
|
||||||
]
|
]
|
||||||
@@ -188,6 +189,11 @@ made through a pull request.
|
|||||||
Email = "contact@crazymax.dev"
|
Email = "contact@crazymax.dev"
|
||||||
GitHub = "crazy-max"
|
GitHub = "crazy-max"
|
||||||
|
|
||||||
|
[people.jedevc]
|
||||||
|
Name = "Justin Chadwell"
|
||||||
|
Email = "me@jedevc.com"
|
||||||
|
GitHub = "jedevc"
|
||||||
|
|
||||||
[people.thajeztah]
|
[people.thajeztah]
|
||||||
Name = "Sebastiaan van Stijn"
|
Name = "Sebastiaan van Stijn"
|
||||||
Email = "github@gone.nl"
|
Email = "github@gone.nl"
|
||||||
|
88
bake/bake.go
88
bake/bake.go
@@ -134,7 +134,7 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
gt = append(gt, target)
|
gt = append(gt, target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
g = []*Group{{Targets: dedupString(gt)}}
|
g = []*Group{{Targets: dedupSlice(gt)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, t := range m {
|
for name, t := range m {
|
||||||
@@ -146,7 +146,7 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
return m, g, nil
|
return m, g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupString(s []string) []string {
|
func dedupSlice(s []string) []string {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@@ -161,21 +161,54 @@ func dedupString(s []string) []string {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func dedupMap(ms ...map[string]string) map[string]string {
|
||||||
|
if len(ms) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
res := map[string]string{}
|
||||||
|
for _, m := range ms {
|
||||||
|
if len(m) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
if _, ok := res[k]; !ok {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func sliceToMap(env []string) (res map[string]string) {
|
||||||
|
res = make(map[string]string)
|
||||||
|
for _, s := range env {
|
||||||
|
kv := strings.SplitN(s, "=", 2)
|
||||||
|
key := kv[0]
|
||||||
|
switch {
|
||||||
|
case len(kv) == 1:
|
||||||
|
res[key] = ""
|
||||||
|
default:
|
||||||
|
res[key] = kv[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
err = formatHCLError(err, files)
|
err = formatHCLError(err, files)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var c Config
|
var c Config
|
||||||
var fs []*hcl.File
|
var composeFiles []File
|
||||||
|
var hclFiles []*hcl.File
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
cfg, isCompose, composeErr := ParseComposeFile(f.Data, f.Name)
|
isCompose, composeErr := validateComposeFile(f.Data, f.Name)
|
||||||
if isCompose {
|
if isCompose {
|
||||||
if composeErr != nil {
|
if composeErr != nil {
|
||||||
return nil, composeErr
|
return nil, composeErr
|
||||||
}
|
}
|
||||||
c = mergeConfig(c, *cfg)
|
composeFiles = append(composeFiles, f)
|
||||||
c = dedupeConfig(c)
|
|
||||||
}
|
}
|
||||||
if !isCompose {
|
if !isCompose {
|
||||||
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
||||||
@@ -183,7 +216,7 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs = append(fs, hf)
|
hclFiles = append(hclFiles, hf)
|
||||||
} else if composeErr != nil {
|
} else if composeErr != nil {
|
||||||
return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err)
|
return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -192,8 +225,17 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fs) > 0 {
|
if len(composeFiles) > 0 {
|
||||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
cfg, cmperr := ParseComposeFiles(composeFiles)
|
||||||
|
if cmperr != nil {
|
||||||
|
return nil, errors.Wrap(cmperr, "failed to parse compose file")
|
||||||
|
}
|
||||||
|
c = mergeConfig(c, *cfg)
|
||||||
|
c = dedupeConfig(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hclFiles) > 0 {
|
||||||
|
if err := hclparser.Parse(hcl.MergeFiles(hclFiles), hclparser.Opt{
|
||||||
LookupVar: os.LookupEnv,
|
LookupVar: os.LookupEnv,
|
||||||
Vars: defaults,
|
Vars: defaults,
|
||||||
ValidateLabel: validateTargetName,
|
ValidateLabel: validateTargetName,
|
||||||
@@ -201,18 +243,25 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupeConfig(c Config) Config {
|
func dedupeConfig(c Config) Config {
|
||||||
c2 := c
|
c2 := c
|
||||||
|
c2.Groups = make([]*Group, 0, len(c2.Groups))
|
||||||
|
for _, g := range c.Groups {
|
||||||
|
g1 := *g
|
||||||
|
g1.Targets = dedupSlice(g1.Targets)
|
||||||
|
c2.Groups = append(c2.Groups, &g1)
|
||||||
|
}
|
||||||
c2.Targets = make([]*Target, 0, len(c2.Targets))
|
c2.Targets = make([]*Target, 0, len(c2.Targets))
|
||||||
m := map[string]*Target{}
|
mt := map[string]*Target{}
|
||||||
for _, t := range c.Targets {
|
for _, t := range c.Targets {
|
||||||
if t2, ok := m[t.Name]; ok {
|
if t2, ok := mt[t.Name]; ok {
|
||||||
t2.Merge(t)
|
t2.Merge(t)
|
||||||
} else {
|
} else {
|
||||||
m[t.Name] = t
|
mt[t.Name] = t
|
||||||
c2.Targets = append(c2.Targets, t)
|
c2.Targets = append(c2.Targets, t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -223,19 +272,6 @@ func ParseFile(dt []byte, fn string) (*Config, error) {
|
|||||||
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
|
|
||||||
fnl := strings.ToLower(fn)
|
|
||||||
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
|
||||||
cfg, err := ParseCompose(dt)
|
|
||||||
return cfg, true, err
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
cfg, err := ParseCompose(dt)
|
|
||||||
return cfg, err == nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Groups []*Group `json:"group" hcl:"group,block"`
|
Groups []*Group `json:"group" hcl:"group,block"`
|
||||||
Targets []*Target `json:"target" hcl:"target,block"`
|
Targets []*Target `json:"target" hcl:"target,block"`
|
||||||
@@ -418,7 +454,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveGroup(name string) []string {
|
func (c Config) ResolveGroup(name string) []string {
|
||||||
return dedupString(c.group(name, map[string][]string{}))
|
return dedupSlice(c.group(name, map[string][]string{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) group(name string, visited map[string][]string) []string {
|
func (c Config) group(name string, visited map[string][]string) []string {
|
||||||
|
@@ -530,7 +530,8 @@ func TestReadEmptyTargets(t *testing.T) {
|
|||||||
Name: "docker-compose.yml",
|
Name: "docker-compose.yml",
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
services:
|
services:
|
||||||
app2: {}
|
app2:
|
||||||
|
build: {}
|
||||||
`),
|
`),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1226,3 +1227,35 @@ target "f" {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnknownExt(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
context = "dir"
|
||||||
|
args = {
|
||||||
|
v1 = "foo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
dockerfile: Dockerfile-alternate
|
||||||
|
args:
|
||||||
|
v2: "bar"
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.foo"},
|
||||||
|
{Data: dt2, Name: "c2.bar"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, "app", c.Targets[0].Name)
|
||||||
|
require.Equal(t, "foo", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "bar", c.Targets[0].Args["v2"])
|
||||||
|
require.Equal(t, "dir", *c.Targets[0].Context)
|
||||||
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[0].Dockerfile)
|
||||||
|
}
|
||||||
|
180
bake/compose.go
180
bake/compose.go
@@ -1,51 +1,40 @@
|
|||||||
package bake
|
package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/dotenv"
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errComposeInvalid is returned when a compose file is invalid
|
func ParseComposeFiles(fs []File) (*Config, error) {
|
||||||
var errComposeInvalid = errors.New("invalid compose file")
|
envs, err := composeEnv()
|
||||||
|
|
||||||
func parseCompose(dt []byte) (*compose.Project, error) {
|
|
||||||
return loader.Load(compose.ConfigDetails{
|
|
||||||
ConfigFiles: []compose.ConfigFile{
|
|
||||||
{
|
|
||||||
Content: dt,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Environment: envMap(os.Environ()),
|
|
||||||
}, func(options *loader.Options) {
|
|
||||||
options.SkipNormalization = true
|
|
||||||
options.SkipConsistencyCheck = true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func envMap(env []string) map[string]string {
|
|
||||||
result := make(map[string]string, len(env))
|
|
||||||
for _, s := range env {
|
|
||||||
kv := strings.SplitN(s, "=", 2)
|
|
||||||
if len(kv) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
result[kv[0]] = kv[1]
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseCompose(dt []byte) (*Config, error) {
|
|
||||||
cfg, err := parseCompose(dt)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err = composeValidate(cfg); err != nil {
|
var cfgs []compose.ConfigFile
|
||||||
|
for _, f := range fs {
|
||||||
|
cfgs = append(cfgs, compose.ConfigFile{
|
||||||
|
Filename: f.Name,
|
||||||
|
Content: f.Data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ParseCompose(cfgs, envs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) {
|
||||||
|
cfg, err := loader.Load(compose.ConfigDetails{
|
||||||
|
ConfigFiles: cfgs,
|
||||||
|
Environment: envs,
|
||||||
|
}, func(options *loader.Options) {
|
||||||
|
options.SkipNormalization = true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +47,7 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
|
|
||||||
for _, s := range cfg.Services {
|
for _, s := range cfg.Services {
|
||||||
if s.Build == nil {
|
if s.Build == nil {
|
||||||
s.Build = &compose.BuildConfig{}
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
targetName := sanitizeTargetName(s.Name)
|
targetName := sanitizeTargetName(s.Name)
|
||||||
@@ -124,6 +113,86 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateComposeFile(dt []byte, fn string) (bool, error) {
|
||||||
|
envs, err := composeEnv()
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
fnl := strings.ToLower(fn)
|
||||||
|
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
||||||
|
return true, validateCompose(dt, envs)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
err = validateCompose(dt, envs)
|
||||||
|
return err == nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateCompose(dt []byte, envs map[string]string) error {
|
||||||
|
_, err := loader.Load(compose.ConfigDetails{
|
||||||
|
ConfigFiles: []compose.ConfigFile{
|
||||||
|
{
|
||||||
|
Content: dt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Environment: envs,
|
||||||
|
}, func(options *loader.Options) {
|
||||||
|
options.SkipNormalization = true
|
||||||
|
// consistency is checked later in ParseCompose to ensure multiple
|
||||||
|
// compose files can be merged together
|
||||||
|
options.SkipConsistencyCheck = true
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func composeEnv() (map[string]string, error) {
|
||||||
|
envs := sliceToMap(os.Environ())
|
||||||
|
if wd, err := os.Getwd(); err == nil {
|
||||||
|
envs, err = loadDotEnv(envs, wd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return envs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string, error) {
|
||||||
|
if curenv == nil {
|
||||||
|
curenv = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
ef, err := filepath.Abs(filepath.Join(workingDir, ".env"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = os.Stat(ef); os.IsNotExist(err) {
|
||||||
|
return curenv, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := os.ReadFile(ef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
envs, err := dotenv.UnmarshalBytes(dt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range envs {
|
||||||
|
if _, set := curenv[k]; set {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
curenv[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return curenv, nil
|
||||||
|
}
|
||||||
|
|
||||||
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -151,10 +220,12 @@ type xbake struct {
|
|||||||
Pull *bool `yaml:"pull,omitempty"`
|
Pull *bool `yaml:"pull,omitempty"`
|
||||||
NoCache *bool `yaml:"no-cache,omitempty"`
|
NoCache *bool `yaml:"no-cache,omitempty"`
|
||||||
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
||||||
|
Contexts stringMap `yaml:"contexts,omitempty"`
|
||||||
// don't forget to update documentation if you add a new field:
|
// don't forget to update documentation if you add a new field:
|
||||||
// docs/guides/bake/compose-file.md#extension-field-with-x-bake
|
// docs/guides/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stringMap map[string]string
|
||||||
type stringArray []string
|
type stringArray []string
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
@@ -188,25 +259,25 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(xb.Tags) > 0 {
|
if len(xb.Tags) > 0 {
|
||||||
t.Tags = dedupString(append(t.Tags, xb.Tags...))
|
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheFrom) > 0 {
|
if len(xb.CacheFrom) > 0 {
|
||||||
t.CacheFrom = dedupString(append(t.CacheFrom, xb.CacheFrom...))
|
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheTo) > 0 {
|
if len(xb.CacheTo) > 0 {
|
||||||
t.CacheTo = dedupString(append(t.CacheTo, xb.CacheTo...))
|
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
||||||
}
|
}
|
||||||
if len(xb.Secrets) > 0 {
|
if len(xb.Secrets) > 0 {
|
||||||
t.Secrets = dedupString(append(t.Secrets, xb.Secrets...))
|
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupString(append(t.SSH, xb.SSH...))
|
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupString(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
}
|
}
|
||||||
if len(xb.Outputs) > 0 {
|
if len(xb.Outputs) > 0 {
|
||||||
t.Outputs = dedupString(append(t.Outputs, xb.Outputs...))
|
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
||||||
}
|
}
|
||||||
if xb.Pull != nil {
|
if xb.Pull != nil {
|
||||||
t.Pull = xb.Pull
|
t.Pull = xb.Pull
|
||||||
@@ -215,34 +286,15 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|||||||
t.NoCache = xb.NoCache
|
t.NoCache = xb.NoCache
|
||||||
}
|
}
|
||||||
if len(xb.NoCacheFilter) > 0 {
|
if len(xb.NoCacheFilter) > 0 {
|
||||||
t.NoCacheFilter = dedupString(append(t.NoCacheFilter, xb.NoCacheFilter...))
|
t.NoCacheFilter = dedupSlice(append(t.NoCacheFilter, xb.NoCacheFilter...))
|
||||||
|
}
|
||||||
|
if len(xb.Contexts) > 0 {
|
||||||
|
t.Contexts = dedupMap(t.Contexts, xb.Contexts)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeValidate validates a compose file
|
|
||||||
func composeValidate(project *compose.Project) error {
|
|
||||||
for _, s := range project.Services {
|
|
||||||
if s.Build != nil {
|
|
||||||
for _, secret := range s.Build.Secrets {
|
|
||||||
if _, ok := project.Secrets[secret.Source]; !ok {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("service %q refers to undefined build secret %s", sanitizeTargetName(s.Name), secret.Source))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for name, secret := range project.Secrets {
|
|
||||||
if secret.External.External {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if secret.File == "" && secret.Environment == "" {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("secret %q must declare either `file` or `environment`", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
||||||
|
@@ -2,9 +2,12 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,11 +40,11 @@ secrets:
|
|||||||
file: /root/.aws/credentials
|
file: /root/.aws/credentials
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, c.Groups[0].Name, "default")
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
sort.Strings(c.Groups[0].Targets)
|
sort.Strings(c.Groups[0].Targets)
|
||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
@@ -58,8 +61,8 @@ secrets:
|
|||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, c.Targets[1].CacheFrom, []string{"type=local,src=path/to/cache"})
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
||||||
require.Equal(t, c.Targets[1].CacheTo, []string{"type=local,dest=path/to/cache"})
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
"id=token,env=ENV_TOKEN",
|
||||||
@@ -75,9 +78,10 @@ services:
|
|||||||
webapp:
|
webapp:
|
||||||
build: ./db
|
build: ./db
|
||||||
`)
|
`)
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
@@ -93,7 +97,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
@@ -118,15 +122,15 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "db", *c.Targets[0].Target)
|
require.Equal(t, "db", *c.Targets[0].Target)
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, "webapp", *c.Targets[1].Target)
|
require.Equal(t, "webapp", *c.Targets[1].Target)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,11 +156,11 @@ services:
|
|||||||
os.Setenv("ZZZ_BAR", "zzz_foo")
|
os.Setenv("ZZZ_BAR", "zzz_foo")
|
||||||
defer os.Unsetenv("ZZZ_BAR")
|
defer os.Unsetenv("ZZZ_BAR")
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, sliceToMap(os.Environ()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, c.Targets[0].Args["FOO"], "bar")
|
require.Equal(t, "bar", c.Targets[0].Args["FOO"])
|
||||||
require.Equal(t, c.Targets[0].Args["BAR"], "zzz_foo")
|
require.Equal(t, "zzz_foo", c.Targets[0].Args["BAR"])
|
||||||
require.Equal(t, c.Targets[0].Args["BRB"], "FOO")
|
require.Equal(t, "FOO", c.Targets[0].Args["BRB"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestInconsistentComposeFile(t *testing.T) {
|
||||||
@@ -166,8 +170,8 @@ services:
|
|||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
@@ -191,7 +195,7 @@ networks:
|
|||||||
gateway: 10.5.0.254
|
gateway: 10.5.0.254
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,9 +212,9 @@ services:
|
|||||||
- bar
|
- bar
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, c.Targets[0].Tags, []string{"foo", "bar"})
|
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
@@ -245,7 +249,7 @@ networks:
|
|||||||
name: test-net
|
name: test-net
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,6 +271,8 @@ services:
|
|||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
x-bake:
|
x-bake:
|
||||||
|
contexts:
|
||||||
|
alpine: docker-image://alpine:3.13
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
- ct-addon:alp
|
- ct-addon:alp
|
||||||
@@ -296,24 +302,25 @@ services:
|
|||||||
no-cache: true
|
no-cache: true
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
|
require.Equal(t, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"}, c.Targets[0].Args)
|
||||||
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"})
|
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, c.Targets[0].CacheFrom, []string{"user/app:cache", "type=local,src=path/to/cache"})
|
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||||
require.Equal(t, c.Targets[0].CacheTo, []string{"user/app:cache", "type=local,dest=path/to/cache"})
|
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||||
require.Equal(t, c.Targets[0].Pull, newBool(true))
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
require.Equal(t, c.Targets[1].SSH, []string{"default"})
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
||||||
require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
|
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
||||||
require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
||||||
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
@@ -339,12 +346,12 @@ services:
|
|||||||
- type=local,dest=path/to/cache
|
- type=local,dest=path/to/cache
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:baz"})
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, c.Targets[0].CacheFrom, []string{"user/app:cache", "type=local,src=path/to/cache"})
|
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
||||||
require.Equal(t, c.Targets[0].CacheTo, []string{"user/app:cache", "type=local,dest=path/to/cache"})
|
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
@@ -372,9 +379,33 @@ services:
|
|||||||
- ` + envf.Name() + `
|
- ` + envf.Name() + `
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
c, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"})
|
require.Equal(t, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"}, c.Targets[0].Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDotEnv(t *testing.T) {
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
|
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
scratch:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
args:
|
||||||
|
FOO:
|
||||||
|
`)
|
||||||
|
|
||||||
|
chdir(t, tmpdir)
|
||||||
|
c, err := ParseComposeFiles([]File{{
|
||||||
|
Name: "docker-compose.yml",
|
||||||
|
Data: dt,
|
||||||
|
}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, map[string]string{"FOO": "bar"}, c.Targets[0].Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
func TestPorts(t *testing.T) {
|
||||||
@@ -394,7 +425,7 @@ services:
|
|||||||
published: "3306"
|
published: "3306"
|
||||||
protocol: tcp
|
protocol: tcp
|
||||||
`)
|
`)
|
||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,12 +471,12 @@ func TestServiceName(t *testing.T) {
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.svc, func(t *testing.T) {
|
t.Run(tt.svc, func(t *testing.T) {
|
||||||
_, err := ParseCompose([]byte(`
|
_, err := ParseCompose([]compose.ConfigFile{{Content: []byte(`
|
||||||
services:
|
services:
|
||||||
` + tt.svc + `:
|
` + tt.svc + `:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
`))
|
`)}}, nil)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -511,7 +542,7 @@ services:
|
|||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, err := ParseCompose(tt.dt)
|
_, err := ParseCompose([]compose.ConfigFile{{Content: tt.dt}}, nil)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -520,3 +551,98 @@ services:
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateComposeFile(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
fn string
|
||||||
|
dt []byte
|
||||||
|
isCompose bool
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty service",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "build",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
build: .
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "image",
|
||||||
|
fn: "docker-compose.yml",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
simple:
|
||||||
|
image: nginx
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown ext",
|
||||||
|
fn: "docker-compose.foo",
|
||||||
|
dt: []byte(`
|
||||||
|
services:
|
||||||
|
simple:
|
||||||
|
image: nginx
|
||||||
|
`),
|
||||||
|
isCompose: true,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hcl",
|
||||||
|
fn: "docker-bake.hcl",
|
||||||
|
dt: []byte(`
|
||||||
|
target "default" {
|
||||||
|
dockerfile = "test"
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
isCompose: false,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range cases {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
isCompose, err := validateComposeFile(tt.dt, tt.fn)
|
||||||
|
assert.Equal(t, tt.isCompose, isCompose)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// chdir changes the current working directory to the named directory,
|
||||||
|
// and then restore the original working directory at the end of the test.
|
||||||
|
func chdir(t *testing.T, dir string) {
|
||||||
|
olddir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("chdir: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.Chdir(dir); err != nil {
|
||||||
|
t.Fatalf("chdir %s: %v", dir, err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if err := os.Chdir(olddir); err != nil {
|
||||||
|
t.Errorf("chdir to original working directory %s: %v", olddir, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
107
build/build.go
107
build/build.go
@@ -39,6 +39,7 @@ import (
|
|||||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/util/apicaps"
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
@@ -57,6 +58,10 @@ var (
|
|||||||
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
printFallbackImage = "docker/dockerfile-upstream:1.4-outline@sha256:627443ff4e2d0f635d429cfc1da5388bcd5a70949c38adcd3cd7c4e5df67c73c"
|
||||||
|
)
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
|
||||||
@@ -81,7 +86,13 @@ type Options struct {
|
|||||||
Ulimits *opts.UlimitOpt
|
Ulimits *opts.UlimitOpt
|
||||||
|
|
||||||
// Linked marks this target as exclusively linked (not requested by the user).
|
// Linked marks this target as exclusively linked (not requested by the user).
|
||||||
Linked bool
|
Linked bool
|
||||||
|
PrintFunc *PrintFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrintFunc struct {
|
||||||
|
Name string
|
||||||
|
Format string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Inputs struct {
|
type Inputs struct {
|
||||||
@@ -643,7 +654,16 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
|
|||||||
return errors.Errorf("result must be provided")
|
return errors.Errorf("result must be provided")
|
||||||
}
|
}
|
||||||
c, res := cfg.ResultCtx.Client, cfg.ResultCtx.Res
|
c, res := cfg.ResultCtx.Client, cfg.ResultCtx.Res
|
||||||
_, err := c.Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
|
||||||
|
mainCtx := ctx
|
||||||
|
|
||||||
|
_, err := c.Build(context.TODO(), client.SolveOpt{}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
go func() {
|
||||||
|
<-mainCtx.Done()
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
if res.Ref == nil {
|
if res.Ref == nil {
|
||||||
return nil, errors.Errorf("no reference is registered")
|
return nil, errors.Errorf("no reference is registered")
|
||||||
}
|
}
|
||||||
@@ -673,7 +693,8 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer ctr.Release(ctx)
|
defer ctr.Release(context.TODO())
|
||||||
|
|
||||||
proc, err := ctr.Start(ctx, gateway.StartRequest{
|
proc, err := ctr.Start(ctx, gateway.StartRequest{
|
||||||
Args: cfg.Args,
|
Args: cfg.Args,
|
||||||
Env: cfg.Env,
|
Env: cfg.Env,
|
||||||
@@ -709,10 +730,10 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
return BuildWithResultHandler(ctx, drivers, opt, docker, configDir, w, nil)
|
return BuildWithResultHandler(ctx, drivers, opt, docker, configDir, w, nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) {
|
func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
|
||||||
if len(drivers) == 0 {
|
if len(drivers) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
@@ -730,14 +751,14 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if noMobyDriver != nil && !noDefaultLoad() {
|
if noMobyDriver != nil && !noDefaultLoad() && noPrintFunc(opt) {
|
||||||
var noOutputTargets []string
|
var noOutputTargets []string
|
||||||
for name, opt := range opt {
|
for name, opt := range opt {
|
||||||
if !opt.Linked && len(opt.Exports) == 0 {
|
if !opt.Linked && len(opt.Exports) == 0 {
|
||||||
noOutputTargets = append(noOutputTargets, name)
|
noOutputTargets = append(noOutputTargets, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(noOutputTargets) > 0 {
|
if len(noOutputTargets) > 0 && !allowNoOutput {
|
||||||
var warnNoOutputBuf bytes.Buffer
|
var warnNoOutputBuf bytes.Buffer
|
||||||
warnNoOutputBuf.WriteString("No output specified ")
|
warnNoOutputBuf.WriteString("No output specified ")
|
||||||
if len(noOutputTargets) == 1 && noOutputTargets[0] == "default" {
|
if len(noOutputTargets) == 1 && noOutputTargets[0] == "default" {
|
||||||
@@ -1039,22 +1060,69 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
|
|
||||||
cc := c
|
cc := c
|
||||||
|
var printRes map[string][]byte
|
||||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
res, err := c.Solve(ctx, req)
|
var isFallback bool
|
||||||
if err != nil {
|
var origErr error
|
||||||
return nil, err
|
for {
|
||||||
|
if opt.PrintFunc != nil {
|
||||||
|
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
||||||
|
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
||||||
|
} else {
|
||||||
|
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
||||||
|
}
|
||||||
|
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
||||||
|
if isFallback {
|
||||||
|
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res, err := c.Solve(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
if origErr != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var reqErr *errdefs.UnsupportedSubrequestError
|
||||||
|
if !isFallback {
|
||||||
|
if errors.As(err, &reqErr) {
|
||||||
|
switch reqErr.Name {
|
||||||
|
case "frontend.outline", "frontend.targets":
|
||||||
|
isFallback = true
|
||||||
|
origErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
|
||||||
|
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
|
||||||
|
isFallback = true
|
||||||
|
origErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.PrintFunc != nil {
|
||||||
|
printRes = res.Metadata
|
||||||
|
}
|
||||||
|
results.Set(resultKey(dp.driverIndex, k), res)
|
||||||
|
if resultHandleFunc != nil {
|
||||||
|
resultHandleFunc(dp.driverIndex, &ResultContext{cc, res})
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
}
|
}
|
||||||
results.Set(resultKey(dp.driverIndex, k), res)
|
|
||||||
if resultHandleFunc != nil {
|
|
||||||
resultHandleFunc(dp.driverIndex, &ResultContext{cc, res})
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}, ch)
|
}, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res[i] = rr
|
res[i] = rr
|
||||||
|
|
||||||
|
if rr.ExporterResponse == nil {
|
||||||
|
rr.ExporterResponse = map[string]string{}
|
||||||
|
}
|
||||||
|
for k, v := range printRes {
|
||||||
|
rr.ExporterResponse[k] = string(v)
|
||||||
|
}
|
||||||
|
|
||||||
d := drivers[dp.driverIndex].Driver
|
d := drivers[dp.driverIndex].Driver
|
||||||
if d.IsMobyDriver() {
|
if d.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
@@ -1630,3 +1698,12 @@ func tryNodeIdentifier(configDir string) (out string) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func noPrintFunc(opt map[string]Options) bool {
|
||||||
|
for _, v := range opt {
|
||||||
|
if v.PrintFunc != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
@@ -48,6 +48,7 @@ const defaultTargetName = "default"
|
|||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
contextPath string
|
contextPath string
|
||||||
dockerfileName string
|
dockerfileName string
|
||||||
|
printFunc string
|
||||||
|
|
||||||
allow []string
|
allow []string
|
||||||
buildArgs []string
|
buildArgs []string
|
||||||
@@ -122,6 +123,11 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
printFunc, err := parsePrintFunc(in.printFunc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
opts := build.Options{
|
opts := build.Options{
|
||||||
Inputs: build.Inputs{
|
Inputs: build.Inputs{
|
||||||
ContextPath: in.contextPath,
|
ContextPath: in.contextPath,
|
||||||
@@ -141,6 +147,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
Tags: in.tags,
|
Tags: in.tags,
|
||||||
Target: in.target,
|
Target: in.target,
|
||||||
Ulimits: in.ulimits,
|
Ulimits: in.ulimits,
|
||||||
|
PrintFunc: printFunc,
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(in.platforms)
|
platforms, err := platformutil.Parse(in.platforms)
|
||||||
@@ -233,7 +240,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
contextPathHash = in.contextPath
|
contextPathHash = in.contextPath
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, in.invoke != "")
|
||||||
err = wrapBuildError(err, false)
|
err = wrapBuildError(err, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -250,7 +257,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
return errors.Errorf("failed to configure terminal: %v", err)
|
return errors.Errorf("failed to configure terminal: %v", err)
|
||||||
}
|
}
|
||||||
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
||||||
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, true)
|
||||||
return rr, err
|
return rr, err
|
||||||
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -271,7 +278,7 @@ type nopCloser struct {
|
|||||||
|
|
||||||
func (c nopCloser) Close() error { return nil }
|
func (c nopCloser) Close() error { return nil }
|
||||||
|
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, res *build.ResultContext, err error) {
|
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
@@ -290,7 +297,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
|||||||
if res == nil || driverIndex < idx {
|
if res == nil || driverIndex < idx {
|
||||||
idx, res = driverIndex, gotRes
|
idx, res = driverIndex, gotRes
|
||||||
}
|
}
|
||||||
})
|
}, allowNoOutput)
|
||||||
err1 := printer.Wait()
|
err1 := printer.Wait()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
@@ -307,6 +314,14 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
|||||||
|
|
||||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||||
|
|
||||||
|
for k := range resp {
|
||||||
|
if opts[k].PrintFunc != nil {
|
||||||
|
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
|
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -463,6 +478,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||||
|
|
||||||
|
if isExperimental() {
|
||||||
|
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets) [experimental]")
|
||||||
|
}
|
||||||
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
||||||
|
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||||
@@ -481,8 +500,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||||
|
|
||||||
if os.Getenv("BUILDX_EXPERIMENTAL") == "1" {
|
if isExperimental() {
|
||||||
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build. BUILDX_EXPERIMENTAL=1 is required.")
|
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build [experimental]")
|
||||||
}
|
}
|
||||||
|
|
||||||
// hidden flags
|
// hidden flags
|
||||||
@@ -596,6 +615,34 @@ func parseContextNames(values []string) (map[string]build.NamedContext, error) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parsePrintFunc(str string) (*build.PrintFunc, error) {
|
||||||
|
if str == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
csvReader := csv.NewReader(strings.NewReader(str))
|
||||||
|
fields, err := csvReader.Read()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f := &build.PrintFunc{}
|
||||||
|
for _, field := range fields {
|
||||||
|
parts := strings.SplitN(field, "=", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
if parts[0] == "format" {
|
||||||
|
f.Format = parts[1]
|
||||||
|
} else {
|
||||||
|
return nil, errors.Errorf("invalid print field: %s", field)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if f.Name != "" {
|
||||||
|
return nil, errors.Errorf("invalid print value: %s", str)
|
||||||
|
}
|
||||||
|
f.Name = field
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
func writeMetadataFile(filename string, dt interface{}) error {
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
b, err := json.MarshalIndent(dt, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -652,3 +699,11 @@ func (w *wrapped) Error() string {
|
|||||||
func (w *wrapped) Unwrap() error {
|
func (w *wrapped) Unwrap() error {
|
||||||
return w.err
|
return w.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isExperimental() bool {
|
||||||
|
if v, ok := os.LookupEnv("BUILDX_EXPERIMENTAL"); ok {
|
||||||
|
vv, _ := strconv.ParseBool(v)
|
||||||
|
return vv
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@@ -61,32 +61,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
|
||||||
|
|
||||||
driverName := in.driver
|
|
||||||
if driverName == "" {
|
|
||||||
if len(args) == 0 && buildkitHost != "" {
|
|
||||||
driverName = "remote"
|
|
||||||
} else {
|
|
||||||
var arg string
|
|
||||||
if len(args) > 0 {
|
|
||||||
arg = args[0]
|
|
||||||
}
|
|
||||||
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f == nil {
|
|
||||||
return errors.Errorf("no valid drivers found")
|
|
||||||
}
|
|
||||||
driverName = f.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if driver.GetFactory(driverName, true) == nil {
|
|
||||||
return errors.Errorf("failed to find driver %q", in.driver)
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -121,17 +95,48 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
||||||
}
|
}
|
||||||
if in.actionLeave {
|
if in.actionLeave {
|
||||||
return errors.Errorf("failed to find instance %q for leave", name)
|
return errors.Errorf("failed to find instance %q for leave", in.name)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
||||||
|
|
||||||
|
driverName := in.driver
|
||||||
|
if driverName == "" {
|
||||||
|
if ng != nil {
|
||||||
|
driverName = ng.Driver
|
||||||
|
} else if len(args) == 0 && buildkitHost != "" {
|
||||||
|
driverName = "remote"
|
||||||
|
} else {
|
||||||
|
var arg string
|
||||||
|
if len(args) > 0 {
|
||||||
|
arg = args[0]
|
||||||
|
}
|
||||||
|
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
return errors.Errorf("no valid drivers found")
|
||||||
|
}
|
||||||
|
driverName = f.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ng != nil {
|
if ng != nil {
|
||||||
if in.nodeName == "" && !in.actionAppend {
|
if in.nodeName == "" && !in.actionAppend {
|
||||||
return errors.Errorf("existing instance for %s but no append mode, specify --node to make changes for existing instances", name)
|
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name)
|
||||||
}
|
}
|
||||||
|
if driverName != ng.Driver {
|
||||||
|
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := driver.GetFactory(driverName, true); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ngOriginal := ng
|
ngOriginal := ng
|
||||||
@@ -141,14 +146,11 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
if ng == nil {
|
if ng == nil {
|
||||||
ng = &store.NodeGroup{
|
ng = &store.NodeGroup{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
Driver: driverName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ng.Driver == "" || in.driver != "" {
|
|
||||||
ng.Driver = driverName
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags []string
|
var flags []string
|
||||||
if in.flags != "" {
|
if in.flags != "" {
|
||||||
flags, err = shlex.Split(in.flags)
|
flags, err = shlex.Split(in.flags)
|
||||||
@@ -166,6 +168,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
} else {
|
} else {
|
||||||
switch {
|
switch {
|
||||||
case driverName == "kubernetes":
|
case driverName == "kubernetes":
|
||||||
|
if len(args) > 0 {
|
||||||
|
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0])
|
||||||
|
}
|
||||||
// naming endpoint to make --append works
|
// naming endpoint to make --append works
|
||||||
ep = (&url.URL{
|
ep = (&url.URL{
|
||||||
Scheme: driverName,
|
Scheme: driverName,
|
||||||
@@ -277,7 +282,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
var drivers bytes.Buffer
|
var drivers bytes.Buffer
|
||||||
for _, d := range driver.GetFactories() {
|
for _, d := range driver.GetFactories(true) {
|
||||||
if len(drivers.String()) > 0 {
|
if len(drivers.String()) > 0 {
|
||||||
drivers.WriteString(", ")
|
drivers.WriteString(", ")
|
||||||
}
|
}
|
||||||
@@ -315,6 +320,9 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func csvToMap(in []string) (map[string]string, error) {
|
func csvToMap(in []string) (map[string]string, error) {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
m := make(map[string]string, len(in))
|
m := make(map[string]string, len(in))
|
||||||
for _, s := range in {
|
for _, s := range in {
|
||||||
csvReader := csv.NewReader(strings.NewReader(s))
|
csvReader := csv.NewReader(strings.NewReader(s))
|
||||||
|
@@ -115,6 +115,9 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
if len(n.Flags) > 0 {
|
if len(n.Flags) > 0 {
|
||||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
||||||
}
|
}
|
||||||
|
if ngi.drivers[i].version != "" {
|
||||||
|
fmt.Fprintf(w, "Buildkit:\t%s\n", ngi.drivers[i].version)
|
||||||
|
}
|
||||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
48
commands/print.go
Normal file
48
commands/print.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
|
"github.com/moby/buildkit/frontend/subrequests/outline"
|
||||||
|
"github.com/moby/buildkit/frontend/subrequests/targets"
|
||||||
|
)
|
||||||
|
|
||||||
|
func printResult(f *build.PrintFunc, res map[string]string) error {
|
||||||
|
switch f.Name {
|
||||||
|
case "outline":
|
||||||
|
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||||
|
case "targets":
|
||||||
|
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
||||||
|
case "subrequests.describe":
|
||||||
|
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
||||||
|
default:
|
||||||
|
if dt, ok := res["result.txt"]; ok {
|
||||||
|
fmt.Print(dt)
|
||||||
|
} else {
|
||||||
|
log.Printf("%s %+v", f, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type printFunc func([]byte, io.Writer) error
|
||||||
|
|
||||||
|
func printValue(printer printFunc, version string, format string, res map[string]string) error {
|
||||||
|
if format == "json" {
|
||||||
|
fmt.Fprintln(os.Stdout, res["result.json"])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
|
||||||
|
// structure is too new and we don't know how to print it
|
||||||
|
fmt.Fprint(os.Stdout, res["result.txt"])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return printer([]byte(res["result.json"]), os.Stdout)
|
||||||
|
}
|
@@ -138,7 +138,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
|
||||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
||||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||||
@@ -155,9 +155,9 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
||||||
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
|
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
|
||||||
}
|
}
|
||||||
filterKey := "until"
|
untilKey := "until"
|
||||||
if len(unusedForValues) > 0 {
|
if len(unusedForValues) > 0 {
|
||||||
filterKey = "unused-for"
|
untilKey = "unused-for"
|
||||||
}
|
}
|
||||||
untilValues = append(untilValues, unusedForValues...)
|
untilValues = append(untilValues, unusedForValues...)
|
||||||
|
|
||||||
@@ -168,23 +168,27 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
var err error
|
var err error
|
||||||
until, err = time.ParseDuration(untilValues[0])
|
until, err = time.ParseDuration(untilValues[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
|
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", untilKey)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("filters expect only one value")
|
return nil, errors.Errorf("filters expect only one value")
|
||||||
}
|
}
|
||||||
|
|
||||||
bkFilter := make([]string, 0, f.Len())
|
filters := make([]string, 0, f.Len())
|
||||||
for _, field := range f.Keys() {
|
for _, filterKey := range f.Keys() {
|
||||||
values := f.Get(field)
|
if filterKey == untilKey {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
values := f.Get(filterKey)
|
||||||
switch len(values) {
|
switch len(values) {
|
||||||
case 0:
|
case 0:
|
||||||
bkFilter = append(bkFilter, field)
|
filters = append(filters, filterKey)
|
||||||
case 1:
|
case 1:
|
||||||
if field == "id" {
|
if filterKey == "id" {
|
||||||
bkFilter = append(bkFilter, field+"~="+values[0])
|
filters = append(filters, filterKey+"~="+values[0])
|
||||||
} else {
|
} else {
|
||||||
bkFilter = append(bkFilter, field+"=="+values[0])
|
filters = append(filters, filterKey+"=="+values[0])
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("filters expect only one value")
|
return nil, errors.Errorf("filters expect only one value")
|
||||||
@@ -192,6 +196,6 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
}
|
}
|
||||||
return &client.PruneInfo{
|
return &client.PruneInfo{
|
||||||
KeepDuration: until,
|
KeepDuration: until,
|
||||||
Filter: []string{strings.Join(bkFilter, ",")},
|
Filter: []string{strings.Join(filters, ",")},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@@ -60,9 +60,10 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
|
|
||||||
var f driver.Factory
|
var f driver.Factory
|
||||||
if ng.Driver != "" {
|
if ng.Driver != "" {
|
||||||
f = driver.GetFactory(ng.Driver, true)
|
var err error
|
||||||
if f == nil {
|
f, err = driver.GetFactory(ng.Driver, true)
|
||||||
return nil, errors.Errorf("failed to find driver %q", f)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// empty driver means nodegroup was implicitly created as a default
|
// empty driver means nodegroup was implicitly created as a default
|
||||||
|
@@ -20,6 +20,7 @@ target "_common" {
|
|||||||
args = {
|
args = {
|
||||||
GO_VERSION = GO_VERSION
|
GO_VERSION = GO_VERSION
|
||||||
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
|
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
|
||||||
|
BUILDX_EXPERIMENTAL = 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -94,6 +94,56 @@ limitations with the compose format:
|
|||||||
* Specifying variables or global scope attributes is not yet supported
|
* Specifying variables or global scope attributes is not yet supported
|
||||||
* `inherits` service field is not supported, but you can use [YAML anchors](https://docs.docker.com/compose/compose-file/#fragments) to reference other services like the example above
|
* `inherits` service field is not supported, but you can use [YAML anchors](https://docs.docker.com/compose/compose-file/#fragments) to reference other services like the example above
|
||||||
|
|
||||||
|
## `.env` file
|
||||||
|
|
||||||
|
You can declare default environment variables in an environment file named
|
||||||
|
`.env`. This file will be loaded from the current working directory,
|
||||||
|
where the command is executed and applied to compose definitions passed
|
||||||
|
with `-f`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# docker-compose.yml
|
||||||
|
services:
|
||||||
|
webapp:
|
||||||
|
image: docker.io/username/webapp:${TAG:-v1.0.0}
|
||||||
|
build:
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# .env
|
||||||
|
TAG=v1.1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print
|
||||||
|
```
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/username/webapp:v1.1.0"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> System environment variables take precedence over environment variables
|
||||||
|
> in `.env` file.
|
||||||
|
|
||||||
## Extension field with `x-bake`
|
## Extension field with `x-bake`
|
||||||
|
|
||||||
Even if some fields are not (yet) available in the compose specification, you
|
Even if some fields are not (yet) available in the compose specification, you
|
||||||
@@ -209,6 +259,7 @@ Complete list of valid fields for `x-bake`:
|
|||||||
|
|
||||||
* `cache-from`
|
* `cache-from`
|
||||||
* `cache-to`
|
* `cache-to`
|
||||||
|
* `contexts`
|
||||||
* `no-cache`
|
* `no-cache`
|
||||||
* `no-cache-filter`
|
* `no-cache-filter`
|
||||||
* `output`
|
* `output`
|
||||||
|
@@ -25,6 +25,7 @@ Start a build
|
|||||||
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
||||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| `--iidfile` | `string` | | Write the image ID to the file |
|
| `--iidfile` | `string` | | Write the image ID to the file |
|
||||||
|
| `--invoke` | `string` | | Invoke a command after the build [experimental] |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--label` | `stringArray` | | Set metadata for an image |
|
||||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
||||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
||||||
@@ -33,6 +34,7 @@ Start a build
|
|||||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||||
|
| `--print` | `string` | | Print result of information request (e.g., outline, targets) [experimental] |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||||
| `--pull` | | | Always attempt to pull all referenced images |
|
| `--pull` | | | Always attempt to pull all referenced images |
|
||||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
||||||
@@ -47,6 +49,9 @@ Start a build
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
Flags marked with `[experimental]` need to be explicitly enabled by setting the
|
||||||
|
`BUILDX_EXPERIMENTAL=1` environment variable.
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The `buildx build` command starts a build using BuildKit. This command is similar
|
The `buildx build` command starts a build using BuildKit. This command is similar
|
||||||
@@ -436,7 +441,7 @@ $ docker buildx build --load --progress=plain .
|
|||||||
|
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> Check also our [Color output controls guide](https://docs.docker.com/build/guides/color-output/)
|
> Check also our [Color output controls guide](https://github.com/docker/buildx/blob/master/docs/guides/color-output.md)
|
||||||
> for modifying the colors that are used to output information to the terminal.
|
> for modifying the colors that are used to output information to the terminal.
|
||||||
|
|
||||||
### <a name="push"></a> Push the build result to a registry (--push)
|
### <a name="push"></a> Push the build result to a registry (--push)
|
||||||
|
@@ -15,7 +15,7 @@ Create a new builder instance
|
|||||||
| `--bootstrap` | | | Boot builder after creation |
|
| `--bootstrap` | | | Boot builder after creation |
|
||||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
||||||
| [`--config`](#config) | `string` | | BuildKit config file |
|
| [`--config`](#config) | `string` | | BuildKit config file |
|
||||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker`, `docker-container`, `kubernetes`, `remote`) |
|
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) |
|
||||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
||||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
||||||
| [`--name`](#name) | `string` | | Builder instance name |
|
| [`--name`](#name) | `string` | | Builder instance name |
|
||||||
|
@@ -44,7 +44,7 @@ The following example shows information about a builder instance named
|
|||||||
`elated_tesla`:
|
`elated_tesla`:
|
||||||
|
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> Asterisk `*` next to node build platform(s) indicate they had been set manually during `buildx create`. Otherwise, it had been autodetected.
|
> Asterisk `*` next to node build platform(s) indicate they had been set manually during `buildx create`. Otherwise, it had been autodetected.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
@@ -57,10 +57,12 @@ Nodes:
|
|||||||
Name: elated_tesla0
|
Name: elated_tesla0
|
||||||
Endpoint: unix:///var/run/docker.sock
|
Endpoint: unix:///var/run/docker.sock
|
||||||
Status: running
|
Status: running
|
||||||
|
Buildkit: v0.10.3
|
||||||
Platforms: linux/amd64
|
Platforms: linux/amd64
|
||||||
|
|
||||||
Name: elated_tesla1
|
Name: elated_tesla1
|
||||||
Endpoint: ssh://ubuntu@1.2.3.4
|
Endpoint: ssh://ubuntu@1.2.3.4
|
||||||
Status: running
|
Status: running
|
||||||
|
Buildkit: v0.10.3
|
||||||
Platforms: linux/arm64*, linux/arm/v7, linux/arm/v6
|
Platforms: linux/arm64*, linux/arm/v7, linux/arm/v6
|
||||||
```
|
```
|
||||||
|
@@ -11,7 +11,7 @@ Remove build cache
|
|||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `-a`, `--all` | | | Remove all unused images, not just dangling ones |
|
| `-a`, `--all` | | | Include internal/frontend images |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
||||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
| `-f`, `--force` | | | Do not prompt for confirmation |
|
||||||
@@ -21,6 +21,26 @@ Remove build cache
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Clears the build cache of the selected builder.
|
||||||
|
|
||||||
|
You can finely control what cache data is kept using:
|
||||||
|
|
||||||
|
- The `--filter=until=<duration>` flag to keep images that have been used in
|
||||||
|
the last `<duration>` time.
|
||||||
|
|
||||||
|
`<duration>` is a duration string, e.g. `24h` or `2h30m`, with allowable
|
||||||
|
units of `(h)ours`, `(m)inutes` and `(s)econds`.
|
||||||
|
|
||||||
|
- The `--keep-storage=<size>` flag to keep `<size>` bytes of data in the cache.
|
||||||
|
|
||||||
|
`<size>` is a human-readable memory string, e.g. `128mb`, `2gb`, etc. Units
|
||||||
|
are case-insensitive.
|
||||||
|
|
||||||
|
- The `--all` flag to allow clearing internal helper images and frontend images
|
||||||
|
set using the `#syntax=` directive or the `BUILDKIT_SYNTAX` build argument.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
@@ -357,11 +357,14 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
td, _ := exp.(client.TracerDelegate)
|
var opts []client.ClientOpt
|
||||||
|
opts = append(opts, client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}), client.WithTracerDelegate(td))
|
}))
|
||||||
|
if td, ok := exp.(client.TracerDelegate); ok {
|
||||||
|
opts = append(opts, client.WithTracerDelegate(td))
|
||||||
|
}
|
||||||
|
return client.New(ctx, "", opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Factory() driver.Factory {
|
func (d *Driver) Factory() driver.Factory {
|
||||||
|
@@ -54,11 +54,22 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Features() map[driver.Feature]bool {
|
func (d *Driver) Features() map[driver.Feature]bool {
|
||||||
|
var useContainerdSnapshotter bool
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := d.Client(ctx)
|
||||||
|
if err == nil {
|
||||||
|
workers, _ := c.ListWorkers(ctx)
|
||||||
|
for _, w := range workers {
|
||||||
|
if _, ok := w.Labels["org.mobyproject.buildkit.worker.snapshotter"]; ok {
|
||||||
|
useContainerdSnapshotter = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return map[driver.Feature]bool{
|
return map[driver.Feature]bool{
|
||||||
driver.OCIExporter: false,
|
driver.OCIExporter: useContainerdSnapshotter,
|
||||||
driver.DockerExporter: false,
|
driver.DockerExporter: useContainerdSnapshotter,
|
||||||
driver.CacheExport: false,
|
driver.CacheExport: useContainerdSnapshotter,
|
||||||
driver.MultiPlatform: false,
|
driver.MultiPlatform: useContainerdSnapshotter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -5,18 +5,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDefaultContextInitializer(t *testing.T) {
|
func TestDefaultContextInitializer(t *testing.T) {
|
||||||
cli, err := command.NewDockerCli()
|
|
||||||
require.NoError(t, err)
|
|
||||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||||
defer os.Unsetenv("KUBECONFIG")
|
defer os.Unsetenv("KUBECONFIG")
|
||||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, &configfile.ConfigFile{}, command.DefaultContextStoreConfig(), cli.Err())
|
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, command.DefaultContextStoreConfig())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "default", ctx.Meta.Name)
|
assert.Equal(t, "default", ctx.Meta.Name)
|
||||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||||
|
@@ -215,11 +215,14 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
td, _ := exp.(client.TracerDelegate)
|
var opts []client.ClientOpt
|
||||||
|
opts = append(opts, client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}), client.WithTracerDelegate(td))
|
}))
|
||||||
|
if td, ok := exp.(client.TracerDelegate); ok {
|
||||||
|
opts = append(opts, client.WithTracerDelegate(td))
|
||||||
|
}
|
||||||
|
return client.New(ctx, "", opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Factory() driver.Factory {
|
func (d *Driver) Factory() driver.Factory {
|
||||||
|
@@ -213,6 +213,24 @@ func toRootless(d *appsv1.Deployment) error {
|
|||||||
d.Spec.Template.ObjectMeta.Annotations = make(map[string]string, 1)
|
d.Spec.Template.ObjectMeta.Annotations = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
d.Spec.Template.ObjectMeta.Annotations["container.apparmor.security.beta.kubernetes.io/"+containerName] = "unconfined"
|
d.Spec.Template.ObjectMeta.Annotations["container.apparmor.security.beta.kubernetes.io/"+containerName] = "unconfined"
|
||||||
|
|
||||||
|
// Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
|
||||||
|
// but the default VOLUME does not work with rootless on Google's Container-Optimized OS
|
||||||
|
// as it is mounted with `nosuid,nodev`.
|
||||||
|
// https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
|
||||||
|
// https://github.com/moby/buildkit/pull/3097
|
||||||
|
const emptyDirVolName = "buildkitd"
|
||||||
|
d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||||
|
Name: emptyDirVolName,
|
||||||
|
MountPath: "/home/user/.local/share/buildkit",
|
||||||
|
})
|
||||||
|
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||||
|
Name: emptyDirVolName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -92,16 +92,16 @@ func GetDefaultFactory(ctx context.Context, ep string, c dockerclient.APIClient,
|
|||||||
return dd[0].f, nil
|
return dd[0].f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactory(name string, instanceRequired bool) Factory {
|
func GetFactory(name string, instanceRequired bool) (Factory, error) {
|
||||||
for _, f := range drivers {
|
for _, f := range drivers {
|
||||||
if instanceRequired && !f.AllowsInstances() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if f.Name() == name {
|
if f.Name() == name {
|
||||||
return f
|
if instanceRequired && !f.AllowsInstances() {
|
||||||
|
return nil, errors.Errorf("additional instances of driver %q cannot be created", name)
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil, errors.Errorf("failed to find driver %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
||||||
@@ -131,9 +131,12 @@ func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string,
|
|||||||
return &cachedDriver{Driver: d}, nil
|
return &cachedDriver{Driver: d}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactories() []Factory {
|
func GetFactories(instanceRequired bool) []Factory {
|
||||||
ds := make([]Factory, 0, len(drivers))
|
ds := make([]Factory, 0, len(drivers))
|
||||||
for _, d := range drivers {
|
for _, d := range drivers {
|
||||||
|
if instanceRequired && !d.AllowsInstances() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
ds = append(ds, d)
|
ds = append(ds, d)
|
||||||
}
|
}
|
||||||
sort.Slice(ds, func(i, j int) bool {
|
sort.Slice(ds, func(i, j int) bool {
|
||||||
|
16
go.mod
16
go.mod
@@ -3,7 +3,7 @@ module github.com/docker/buildx
|
|||||||
go 1.17
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/compose-spec/compose-go v1.3.0
|
github.com/compose-spec/compose-go v1.4.0
|
||||||
github.com/containerd/console v1.0.3
|
github.com/containerd/console v1.0.3
|
||||||
github.com/containerd/containerd v1.6.6
|
github.com/containerd/containerd v1.6.6
|
||||||
github.com/docker/cli v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
|
github.com/docker/cli v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
|
||||||
@@ -15,7 +15,7 @@ require (
|
|||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||||
github.com/hashicorp/hcl/v2 v2.8.2
|
github.com/hashicorp/hcl/v2 v2.8.2
|
||||||
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4
|
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/morikuni/aec v1.0.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||||
@@ -31,7 +31,7 @@ require (
|
|||||||
go.opentelemetry.io/otel/trace v1.4.1
|
go.opentelemetry.io/otel/trace v1.4.1
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||||
google.golang.org/grpc v1.45.0
|
google.golang.org/grpc v1.47.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
k8s.io/api v0.23.5
|
k8s.io/api v0.23.5
|
||||||
k8s.io/apimachinery v0.23.5
|
k8s.io/apimachinery v0.23.5
|
||||||
@@ -95,7 +95,7 @@ require (
|
|||||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/klauspost/compress v1.15.1 // indirect
|
github.com/klauspost/compress v1.15.7 // indirect
|
||||||
github.com/kr/pretty v0.3.0 // indirect
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
@@ -110,7 +110,7 @@ require (
|
|||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/opencontainers/runc v1.1.3 // indirect
|
github.com/opencontainers/runc v1.1.3 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
@@ -138,9 +138,9 @@ require (
|
|||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
@@ -155,7 +155,7 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220721163225-f1615facb1ca+incompatible // master (v22.06-dev)
|
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible // master (v22.06-dev)
|
||||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible // 22.06 branch (v22.06-dev)
|
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible // 22.06 branch (v22.06-dev)
|
||||||
k8s.io/api => k8s.io/api v0.22.4
|
k8s.io/api => k8s.io/api v0.22.4
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||||
|
39
go.sum
39
go.sum
@@ -117,12 +117,12 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/compose-spec/compose-go v1.3.0 h1:n5fSOUXQsfnCpn/lZBgNM3qEu1PDhvcbWrWXVBlUNmA=
|
github.com/compose-spec/compose-go v1.4.0 h1:zaYVAZ6lIByr7Jffi20AabfeUwcTrdXfH3X1R5HEm+g=
|
||||||
github.com/compose-spec/compose-go v1.3.0/go.mod h1:l7RUULbFFLzlQHuxtJr7SVLyWdqEpbJEGTWCgcu6Eqw=
|
github.com/compose-spec/compose-go v1.4.0/go.mod h1:l7RUULbFFLzlQHuxtJr7SVLyWdqEpbJEGTWCgcu6Eqw=
|
||||||
github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
|
github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
|
||||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
@@ -157,8 +157,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
|||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb h1:oCCuuU3kMO3sjZH/p7LamvQNW9SWoT4yQuMGcdSxGAE=
|
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb h1:oCCuuU3kMO3sjZH/p7LamvQNW9SWoT4yQuMGcdSxGAE=
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
|
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
|
||||||
github.com/docker/cli v20.10.3-0.20220721163225-f1615facb1ca+incompatible h1:Dd/CSOpM6U0thw3xNPlw6m+5/4VOexEcgKlL38haGgk=
|
github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible h1:iKanFYBu6Cum7d9j8JGTw2s/d7hUAcXRkEcp2m8b6Qc=
|
||||||
github.com/docker/cli v20.10.3-0.20220721163225-f1615facb1ca+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/cli-docs-tool v0.5.0 h1:EjGwI6EyB7YemHCC7R8mwXszJTbuq0T0pFuDC5bMhcE=
|
github.com/docker/cli-docs-tool v0.5.0 h1:EjGwI6EyB7YemHCC7R8mwXszJTbuq0T0pFuDC5bMhcE=
|
||||||
github.com/docker/cli-docs-tool v0.5.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
github.com/docker/cli-docs-tool v0.5.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
@@ -190,8 +190,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
||||||
@@ -293,6 +293,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
@@ -377,8 +378,8 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALr
|
|||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
|
||||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
@@ -410,14 +411,14 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
|
|||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4 h1:+AwP6ma57EBQ5+eOSsg29MAylks33kt2MverACSqJv0=
|
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a h1:NI01Z14Hbwo1MHq8ylu4HNkmKGnhk8UZsD6c6FVMcA8=
|
||||||
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4/go.mod h1:yle9eiU1fiJ/WhC4VTLOaQ6rxFou1mc4AhwScHwysi0=
|
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a/go.mod h1:Wa+LkeUQ9NJTVXTAY38rhkfKVQcuCIo2fbavRSuGsbI=
|
||||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||||
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
|
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||||
github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
|
github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
|
||||||
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||||
@@ -474,8 +475,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
|||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@@ -837,8 +838,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@@ -965,8 +967,8 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 h1:FglFEfyj61zP3c6LgjmVHxYxZWXYul9oiS1EZqD5gLc=
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@@ -987,11 +989,10 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
|
|||||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
|
||||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@@ -14,6 +14,7 @@ RUN apk add --no-cache rsync git
|
|||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY --from=docsgen /out/docsgen /usr/bin
|
COPY --from=docsgen /out/docsgen /usr/bin
|
||||||
ARG FORMATS
|
ARG FORMATS
|
||||||
|
ARG BUILDX_EXPERIMENTAL
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs <<EOT
|
--mount=target=.,type=tmpfs <<EOT
|
||||||
set -e
|
set -e
|
||||||
|
18
hack/hash-files
Executable file
18
hack/hash-files
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
: ${RELEASE_OUT=./release-out}
|
||||||
|
|
||||||
|
# checksums
|
||||||
|
if ! type shasum > /dev/null 2>&1; then
|
||||||
|
echo >&2 "ERROR: shasum is required"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt
|
||||||
|
|
||||||
|
# verify
|
||||||
|
(
|
||||||
|
cd ./${RELEASE_OUT}
|
||||||
|
shasum -a 256 -U -c checksums.txt
|
||||||
|
)
|
13
hack/release
13
hack/release
@@ -25,15 +25,4 @@ fi
|
|||||||
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/
|
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/
|
||||||
find ./${RELEASE_OUT} -type d -empty -delete
|
find ./${RELEASE_OUT} -type d -empty -delete
|
||||||
|
|
||||||
# checksums
|
source ./hack/hash-files
|
||||||
if ! type shasum > /dev/null 2>&1; then
|
|
||||||
echo >&2 "ERROR: shasum is required"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt
|
|
||||||
|
|
||||||
# verify
|
|
||||||
(
|
|
||||||
cd ./${RELEASE_OUT}
|
|
||||||
shasum -a 256 -U -c checksums.txt
|
|
||||||
)
|
|
||||||
|
@@ -13,6 +13,14 @@ import (
|
|||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const helpMessage = `
|
||||||
|
Available commads are:
|
||||||
|
reload reloads the context and build it.
|
||||||
|
rollback re-runs the interactive container with initial rootfs contents.
|
||||||
|
exit exits monitor.
|
||||||
|
help shows this message.
|
||||||
|
`
|
||||||
|
|
||||||
// RunMonitor provides an interactive session for running and managing containers via specified IO.
|
// RunMonitor provides an interactive session for running and managing containers via specified IO.
|
||||||
func RunMonitor(ctx context.Context, containerConfig build.ContainerConfig, reloadFunc func(context.Context) (*build.ResultContext, error), stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
func RunMonitor(ctx context.Context, containerConfig build.ContainerConfig, reloadFunc func(context.Context) (*build.ResultContext, error), stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
||||||
monitorIn, monitorOut := ioSetPipe()
|
monitorIn, monitorOut := ioSetPipe()
|
||||||
@@ -34,13 +42,18 @@ func RunMonitor(ctx context.Context, containerConfig build.ContainerConfig, relo
|
|||||||
|
|
||||||
m := &monitor{
|
m := &monitor{
|
||||||
invokeIO: newIOForwarder(containerIn),
|
invokeIO: newIOForwarder(containerIn),
|
||||||
muxIO: newMuxIO(ioSetIn{stdin, stdout, stderr}, []ioSetOutContext{monitorOutCtx, containerOutCtx}, 1, "Switched IO\n"),
|
muxIO: newMuxIO(ioSetIn{stdin, stdout, stderr}, []ioSetOutContext{monitorOutCtx, containerOutCtx}, 1, func(prev int, res int) string {
|
||||||
|
if prev == 0 && res == 0 {
|
||||||
|
// No toggle happened because container I/O isn't enabled.
|
||||||
|
return "No running interactive containers. You can start one by issuing rollback command\n"
|
||||||
|
}
|
||||||
|
return "Switched IO\n"
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start container automatically
|
// Start container automatically
|
||||||
go func() {
|
fmt.Fprintf(stdout, "Launching interactive container. Press Ctrl-a-c to switch to monitor console\n")
|
||||||
m.rollback(ctx, containerConfig)
|
m.rollback(ctx, containerConfig)
|
||||||
}()
|
|
||||||
|
|
||||||
// Serve monitor commands
|
// Serve monitor commands
|
||||||
monitorForwarder := newIOForwarder(monitorIn)
|
monitorForwarder := newIOForwarder(monitorIn)
|
||||||
@@ -52,6 +65,10 @@ func RunMonitor(ctx context.Context, containerConfig build.ContainerConfig, relo
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(doneCh)
|
defer close(doneCh)
|
||||||
defer in.Close()
|
defer in.Close()
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
in.Close()
|
||||||
|
}()
|
||||||
t := term.NewTerminal(readWriter{in.stdin, in.stdout}, "(buildx) ")
|
t := term.NewTerminal(readWriter{in.stdin, in.stdout}, "(buildx) ")
|
||||||
for {
|
for {
|
||||||
l, err := t.ReadLine()
|
l, err := t.ReadLine()
|
||||||
@@ -73,20 +90,31 @@ func RunMonitor(ctx context.Context, containerConfig build.ContainerConfig, relo
|
|||||||
// rollback the running container with the new result
|
// rollback the running container with the new result
|
||||||
containerConfig.ResultCtx = res
|
containerConfig.ResultCtx = res
|
||||||
m.rollback(ctx, containerConfig)
|
m.rollback(ctx, containerConfig)
|
||||||
|
fmt.Fprint(stdout, "Interactive container was restarted. Press Ctrl-a-c to switch to the new container\n")
|
||||||
}
|
}
|
||||||
case "rollback":
|
case "rollback":
|
||||||
m.rollback(ctx, containerConfig)
|
m.rollback(ctx, containerConfig)
|
||||||
|
fmt.Fprint(stdout, "Interactive container was restarted. Press Ctrl-a-c to switch to the new container\n")
|
||||||
case "exit":
|
case "exit":
|
||||||
return
|
return
|
||||||
|
case "help":
|
||||||
|
fmt.Fprint(stdout, helpMessage)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("unknown command: %q\n", l)
|
fmt.Printf("unknown command: %q\n", l)
|
||||||
|
fmt.Fprint(stdout, helpMessage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
select {
|
select {
|
||||||
case <-doneCh:
|
case <-doneCh:
|
||||||
|
if m.curInvokeCancel != nil {
|
||||||
|
m.curInvokeCancel()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
|
if m.curInvokeCancel != nil {
|
||||||
|
m.curInvokeCancel()
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
case <-monitorDisableCh:
|
case <-monitorDisableCh:
|
||||||
}
|
}
|
||||||
@@ -227,7 +255,7 @@ type ioSetOutContext struct {
|
|||||||
// newMuxIO forwards IO stream to/from "in" and "outs".
|
// newMuxIO forwards IO stream to/from "in" and "outs".
|
||||||
// "outs" are closed automatically when "in" reaches EOF.
|
// "outs" are closed automatically when "in" reaches EOF.
|
||||||
// "in" doesn't closed automatically so the caller needs to explicitly close it.
|
// "in" doesn't closed automatically so the caller needs to explicitly close it.
|
||||||
func newMuxIO(in ioSetIn, out []ioSetOutContext, initIdx int, toggleMessage string) *muxIO {
|
func newMuxIO(in ioSetIn, out []ioSetOutContext, initIdx int, toggleMessage func(prev int, res int) string) *muxIO {
|
||||||
m := &muxIO{
|
m := &muxIO{
|
||||||
enabled: make(map[int]struct{}),
|
enabled: make(map[int]struct{}),
|
||||||
in: in,
|
in: in,
|
||||||
@@ -327,7 +355,7 @@ type muxIO struct {
|
|||||||
in ioSetIn
|
in ioSetIn
|
||||||
out []ioSetOutContext
|
out []ioSetOutContext
|
||||||
closedCh chan struct{}
|
closedCh chan struct{}
|
||||||
toggleMessage string
|
toggleMessage func(prev int, res int) string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *muxIO) waitClosed() {
|
func (m *muxIO) waitClosed() {
|
||||||
@@ -357,6 +385,7 @@ func (m *muxIO) toggleIO() {
|
|||||||
if m.out[m.cur].disableHook != nil {
|
if m.out[m.cur].disableHook != nil {
|
||||||
m.out[m.cur].disableHook()
|
m.out[m.cur].disableHook()
|
||||||
}
|
}
|
||||||
|
prev := m.cur
|
||||||
for {
|
for {
|
||||||
if m.cur+1 >= m.maxCur {
|
if m.cur+1 >= m.maxCur {
|
||||||
m.cur = 0
|
m.cur = 0
|
||||||
@@ -368,10 +397,11 @@ func (m *muxIO) toggleIO() {
|
|||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
res := m.cur
|
||||||
if m.out[m.cur].enableHook != nil {
|
if m.out[m.cur].enableHook != nil {
|
||||||
m.out[m.cur].enableHook()
|
m.out[m.cur].enableHook()
|
||||||
}
|
}
|
||||||
fmt.Fprintf(m.in.stdout, m.toggleMessage)
|
fmt.Fprint(m.in.stdout, m.toggleMessage(prev, res))
|
||||||
}
|
}
|
||||||
|
|
||||||
func traceReader(r io.ReadCloser, f func(rune) (bool, error)) io.ReadCloser {
|
func traceReader(r io.ReadCloser, f func(rune) (bool, error)) io.ReadCloser {
|
||||||
|
@@ -131,7 +131,7 @@ func TestMuxIO(t *testing.T) {
|
|||||||
outBufs = append(outBufs, outBuf)
|
outBufs = append(outBufs, outBuf)
|
||||||
outs = append(outs, ioSetOutContext{out, nil, nil})
|
outs = append(outs, ioSetOutContext{out, nil, nil})
|
||||||
}
|
}
|
||||||
mio := newMuxIO(in, outs, tt.initIdx, "")
|
mio := newMuxIO(in, outs, tt.initIdx, func(prev int, res int) string { return "" })
|
||||||
for _, i := range tt.inputs {
|
for _, i := range tt.inputs {
|
||||||
// Add input to muxIO
|
// Add input to muxIO
|
||||||
istr, writeback := i(mio)
|
istr, writeback := i(mio)
|
||||||
|
@@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeGroup struct {
|
type NodeGroup struct {
|
||||||
@@ -59,17 +60,42 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var files map[string][]byte
|
||||||
|
if configFile != "" {
|
||||||
|
files, err = confutil.LoadConfigFiles(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if i != -1 {
|
if i != -1 {
|
||||||
n := ng.Nodes[i]
|
n := ng.Nodes[i]
|
||||||
|
needsRestart := false
|
||||||
if endpointsSet {
|
if endpointsSet {
|
||||||
n.Endpoint = endpoint
|
n.Endpoint = endpoint
|
||||||
|
needsRestart = true
|
||||||
}
|
}
|
||||||
if len(platforms) > 0 {
|
if len(platforms) > 0 {
|
||||||
n.Platforms = pp
|
n.Platforms = pp
|
||||||
}
|
}
|
||||||
if flags != nil {
|
if flags != nil {
|
||||||
n.Flags = flags
|
n.Flags = flags
|
||||||
|
needsRestart = true
|
||||||
}
|
}
|
||||||
|
if do != nil {
|
||||||
|
n.DriverOpts = do
|
||||||
|
needsRestart = true
|
||||||
|
}
|
||||||
|
if configFile != "" {
|
||||||
|
for k, v := range files {
|
||||||
|
n.Files[k] = v
|
||||||
|
}
|
||||||
|
needsRestart = true
|
||||||
|
}
|
||||||
|
if needsRestart {
|
||||||
|
logrus.Warn("new settings may not be used until builder is restarted")
|
||||||
|
}
|
||||||
|
|
||||||
ng.Nodes[i] = n
|
ng.Nodes[i] = n
|
||||||
if err := ng.validateDuplicates(endpoint, i); err != nil {
|
if err := ng.validateDuplicates(endpoint, i); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -92,14 +118,7 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
|||||||
Platforms: pp,
|
Platforms: pp,
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
DriverOpts: do,
|
DriverOpts: do,
|
||||||
}
|
Files: files,
|
||||||
|
|
||||||
if configFile != "" {
|
|
||||||
files, err := confutil.LoadConfigFiles(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n.Files = files
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ng.Nodes = append(ng.Nodes, n)
|
ng.Nodes = append(ng.Nodes, n)
|
||||||
|
@@ -8,6 +8,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
@@ -112,7 +113,9 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
imageconfigs := make(map[string]*ocispecs.Image)
|
imageconfigs := make(map[string]*ocispecs.Image)
|
||||||
|
imageconfigsMutex := sync.Mutex{}
|
||||||
buildinfos := make(map[string]*binfotypes.BuildInfo)
|
buildinfos := make(map[string]*binfotypes.BuildInfo)
|
||||||
|
buildinfosMutex := sync.Mutex{}
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(p.ctx)
|
eg, _ := errgroup.WithContext(p.ctx)
|
||||||
for _, platform := range p.platforms {
|
for _, platform := range p.platforms {
|
||||||
@@ -122,12 +125,16 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if img != nil {
|
} else if img != nil {
|
||||||
|
imageconfigsMutex.Lock()
|
||||||
imageconfigs[platforms.Format(platform)] = img
|
imageconfigs[platforms.Format(platform)] = img
|
||||||
|
imageconfigsMutex.Unlock()
|
||||||
}
|
}
|
||||||
if bi, err := imageutil.BuildInfo(dtic); err != nil {
|
if bi, err := imageutil.BuildInfo(dtic); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if bi != nil {
|
} else if bi != nil {
|
||||||
|
buildinfosMutex.Lock()
|
||||||
buildinfos[platforms.Format(platform)] = bi
|
buildinfos[platforms.Format(platform)] = bi
|
||||||
|
buildinfosMutex.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
10
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
10
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
@@ -43,11 +43,6 @@ import (
|
|||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultSeparator = "-"
|
|
||||||
CompatibilitySeparator = "_"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options supported by Load
|
// Options supported by Load
|
||||||
type Options struct {
|
type Options struct {
|
||||||
// Skip schema validation
|
// Skip schema validation
|
||||||
@@ -72,8 +67,6 @@ type Options struct {
|
|||||||
projectName string
|
projectName string
|
||||||
// Indicates when the projectName was imperatively set or guessed from path
|
// Indicates when the projectName was imperatively set or guessed from path
|
||||||
projectNameImperativelySet bool
|
projectNameImperativelySet bool
|
||||||
// Set separator used for naming resources
|
|
||||||
Separator string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Options) SetProjectName(name string, imperativelySet bool) {
|
func (o *Options) SetProjectName(name string, imperativelySet bool) {
|
||||||
@@ -162,7 +155,6 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||||||
LookupValue: configDetails.LookupEnv,
|
LookupValue: configDetails.LookupEnv,
|
||||||
TypeCastMapping: interpolateTypeCastMapping,
|
TypeCastMapping: interpolateTypeCastMapping,
|
||||||
},
|
},
|
||||||
Separator: DefaultSeparator,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, op := range options {
|
for _, op := range options {
|
||||||
@@ -231,7 +223,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !opts.SkipNormalization {
|
if !opts.SkipNormalization {
|
||||||
err = normalize(project, opts.ResolvePaths, opts.Separator)
|
err = normalize(project, opts.ResolvePaths)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
14
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
14
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
@@ -28,7 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
|
// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
|
||||||
func normalize(project *types.Project, resolvePaths bool, separator string) error {
|
func normalize(project *types.Project, resolvePaths bool) error {
|
||||||
absWorkingDir, err := filepath.Abs(project.WorkingDir)
|
absWorkingDir, err := filepath.Abs(project.WorkingDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -110,7 +110,7 @@ func normalize(project *types.Project, resolvePaths bool, separator string) erro
|
|||||||
project.Services[i] = s
|
project.Services[i] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
setNameFromKey(project, separator)
|
setNameFromKey(project)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -143,31 +143,31 @@ func absComposeFiles(composeFiles []string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Resources with no explicit name are actually named by their key in map
|
// Resources with no explicit name are actually named by their key in map
|
||||||
func setNameFromKey(project *types.Project, separator string) {
|
func setNameFromKey(project *types.Project) {
|
||||||
for i, n := range project.Networks {
|
for i, n := range project.Networks {
|
||||||
if n.Name == "" {
|
if n.Name == "" {
|
||||||
n.Name = fmt.Sprintf("%s%s%s", project.Name, separator, i)
|
n.Name = fmt.Sprintf("%s_%s", project.Name, i)
|
||||||
project.Networks[i] = n
|
project.Networks[i] = n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, v := range project.Volumes {
|
for i, v := range project.Volumes {
|
||||||
if v.Name == "" {
|
if v.Name == "" {
|
||||||
v.Name = fmt.Sprintf("%s%s%s", project.Name, separator, i)
|
v.Name = fmt.Sprintf("%s_%s", project.Name, i)
|
||||||
project.Volumes[i] = v
|
project.Volumes[i] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range project.Configs {
|
for i, c := range project.Configs {
|
||||||
if c.Name == "" {
|
if c.Name == "" {
|
||||||
c.Name = fmt.Sprintf("%s%s%s", project.Name, separator, i)
|
c.Name = fmt.Sprintf("%s_%s", project.Name, i)
|
||||||
project.Configs[i] = c
|
project.Configs[i] = c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, s := range project.Secrets {
|
for i, s := range project.Secrets {
|
||||||
if s.Name == "" {
|
if s.Name == "" {
|
||||||
s.Name = fmt.Sprintf("%s%s%s", project.Name, separator, i)
|
s.Name = fmt.Sprintf("%s_%s", project.Name, i)
|
||||||
project.Secrets[i] = s
|
project.Secrets[i] = s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
38
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
38
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
@@ -61,13 +61,14 @@ type SubstituteFunc func(string, Mapping) (string, bool, error)
|
|||||||
// SubstituteWith substitute variables in the string with their values.
|
// SubstituteWith substitute variables in the string with their values.
|
||||||
// It accepts additional substitute function.
|
// It accepts additional substitute function.
|
||||||
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||||
var err error
|
var outerErr error
|
||||||
|
|
||||||
if len(subsFuncs) == 0 {
|
|
||||||
_, subsFunc := getSubstitutionFunctionForTemplate(template)
|
|
||||||
subsFuncs = []SubstituteFunc{subsFunc}
|
|
||||||
}
|
|
||||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||||
|
_, subsFunc := getSubstitutionFunctionForTemplate(substring)
|
||||||
|
if len(subsFuncs) > 0 {
|
||||||
|
subsFunc = subsFuncs[0]
|
||||||
|
}
|
||||||
|
|
||||||
closingBraceIndex := getFirstBraceClosingIndex(substring)
|
closingBraceIndex := getFirstBraceClosingIndex(substring)
|
||||||
rest := ""
|
rest := ""
|
||||||
if closingBraceIndex > -1 {
|
if closingBraceIndex > -1 {
|
||||||
@@ -89,24 +90,21 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
|
|||||||
}
|
}
|
||||||
|
|
||||||
if substitution == "" {
|
if substitution == "" {
|
||||||
err = &InvalidTemplateError{Template: template}
|
outerErr = &InvalidTemplateError{Template: template}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
if braced {
|
if braced {
|
||||||
for _, f := range subsFuncs {
|
var (
|
||||||
var (
|
value string
|
||||||
value string
|
applied bool
|
||||||
applied bool
|
)
|
||||||
)
|
value, applied, outerErr = subsFunc(substitution, mapping)
|
||||||
value, applied, err = f(substitution, mapping)
|
if outerErr != nil {
|
||||||
if err != nil {
|
return ""
|
||||||
return ""
|
}
|
||||||
}
|
if applied {
|
||||||
if !applied {
|
interpolatedNested, err := SubstituteWith(rest, mapping, pattern)
|
||||||
continue
|
|
||||||
}
|
|
||||||
interpolatedNested, err := SubstituteWith(rest, mapping, pattern, subsFuncs...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@@ -121,7 +119,7 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
|
|||||||
return value
|
return value
|
||||||
})
|
})
|
||||||
|
|
||||||
return result, err
|
return result, outerErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) {
|
func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) {
|
||||||
|
22
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
22
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
@@ -35,6 +35,8 @@ import (
|
|||||||
notaryclient "github.com/theupdateframework/notary/client"
|
notaryclient "github.com/theupdateframework/notary/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultInitTimeout = 2 * time.Second
|
||||||
|
|
||||||
// Streams is an interface which exposes the standard input and output streams
|
// Streams is an interface which exposes the standard input and output streams
|
||||||
type Streams interface {
|
type Streams interface {
|
||||||
In() *streams.In
|
In() *streams.In
|
||||||
@@ -77,6 +79,7 @@ type DockerCli struct {
|
|||||||
currentContext string
|
currentContext string
|
||||||
dockerEndpoint docker.Endpoint
|
dockerEndpoint docker.Endpoint
|
||||||
contextStoreConfig store.Config
|
contextStoreConfig store.Config
|
||||||
|
initTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultVersion returns api.defaultVersion.
|
// DefaultVersion returns api.defaultVersion.
|
||||||
@@ -216,7 +219,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
|||||||
cli.contextStore = &ContextStoreWithDefault{
|
cli.contextStore = &ContextStoreWithDefault{
|
||||||
Store: baseContextStore,
|
Store: baseContextStore,
|
||||||
Resolver: func() (*DefaultContext, error) {
|
Resolver: func() (*DefaultContext, error) {
|
||||||
return ResolveDefaultContext(opts.Common, cli.ConfigFile(), cli.contextStoreConfig, cli.Err())
|
return ResolveDefaultContext(opts.Common, cli.contextStoreConfig)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore)
|
cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore)
|
||||||
@@ -244,7 +247,7 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
|
|||||||
contextStore := &ContextStoreWithDefault{
|
contextStore := &ContextStoreWithDefault{
|
||||||
Store: store.New(config.ContextStoreDir(), storeConfig),
|
Store: store.New(config.ContextStoreDir(), storeConfig),
|
||||||
Resolver: func() (*DefaultContext, error) {
|
Resolver: func() (*DefaultContext, error) {
|
||||||
return ResolveDefaultContext(opts, configFile, storeConfig, io.Discard)
|
return ResolveDefaultContext(opts, storeConfig)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
contextName, err := resolveContextName(opts, configFile, contextStore)
|
contextName, err := resolveContextName(opts, configFile, contextStore)
|
||||||
@@ -313,13 +316,20 @@ func resolveDefaultDockerEndpoint(opts *cliflags.CommonOptions) (docker.Endpoint
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *DockerCli) getInitTimeout() time.Duration {
|
||||||
|
if cli.initTimeout != 0 {
|
||||||
|
return cli.initTimeout
|
||||||
|
}
|
||||||
|
return defaultInitTimeout
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *DockerCli) initializeFromClient() {
|
func (cli *DockerCli) initializeFromClient() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
if strings.HasPrefix(cli.DockerEndpoint().Host, "tcp://") {
|
if !strings.HasPrefix(cli.DockerEndpoint().Host, "ssh://") {
|
||||||
// @FIXME context.WithTimeout doesn't work with connhelper / ssh connections
|
// @FIXME context.WithTimeout doesn't work with connhelper / ssh connections
|
||||||
// time="2020-04-10T10:16:26Z" level=warning msg="commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
// time="2020-04-10T10:16:26Z" level=warning msg="commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
||||||
var cancel func()
|
var cancel func()
|
||||||
ctx, cancel = context.WithTimeout(ctx, 2*time.Second)
|
ctx, cancel = context.WithTimeout(ctx, cli.getInitTimeout())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -456,10 +466,10 @@ func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigF
|
|||||||
if len(opts.Hosts) > 0 {
|
if len(opts.Hosts) > 0 {
|
||||||
return DefaultContextName, nil
|
return DefaultContextName, nil
|
||||||
}
|
}
|
||||||
if _, present := os.LookupEnv(client.EnvOverrideHost); present {
|
if os.Getenv(client.EnvOverrideHost) != "" {
|
||||||
return DefaultContextName, nil
|
return DefaultContextName, nil
|
||||||
}
|
}
|
||||||
if ctxName, ok := os.LookupEnv("DOCKER_CONTEXT"); ok {
|
if ctxName := os.Getenv("DOCKER_CONTEXT"); ctxName != "" {
|
||||||
return ctxName, nil
|
return ctxName, nil
|
||||||
}
|
}
|
||||||
if config != nil && config.CurrentContext != "" {
|
if config != nil && config.CurrentContext != "" {
|
||||||
|
16
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
16
vendor/github.com/docker/cli/cli/command/cli_options.go
generated
vendored
@@ -1,13 +1,10 @@
|
|||||||
package command
|
package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/context/docker"
|
|
||||||
"github.com/docker/cli/cli/context/store"
|
|
||||||
"github.com/docker/cli/cli/streams"
|
"github.com/docker/cli/cli/streams"
|
||||||
"github.com/moby/term"
|
"github.com/moby/term"
|
||||||
)
|
)
|
||||||
@@ -82,19 +79,6 @@ func WithContentTrust(enabled bool) DockerCliOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithContextEndpointType add support for an additional typed endpoint in the context store
|
|
||||||
// Plugins should use this to store additional endpoints configuration in the context store
|
|
||||||
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
|
||||||
return func(cli *DockerCli) error {
|
|
||||||
switch endpointName {
|
|
||||||
case docker.DockerEndpoint:
|
|
||||||
return fmt.Errorf("cannot change %q endpoint type", endpointName)
|
|
||||||
}
|
|
||||||
cli.contextStoreConfig.SetEndpoint(endpointName, endpointType)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaultContextStoreConfig configures the cli to use the default context store configuration.
|
// WithDefaultContextStoreConfig configures the cli to use the default context store configuration.
|
||||||
func WithDefaultContextStoreConfig() DockerCliOption {
|
func WithDefaultContextStoreConfig() DockerCliOption {
|
||||||
return func(cli *DockerCli) error {
|
return func(cli *DockerCli) error {
|
||||||
|
6
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
6
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
@@ -2,9 +2,7 @@ package command
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
|
||||||
"github.com/docker/cli/cli/context/docker"
|
"github.com/docker/cli/cli/context/docker"
|
||||||
"github.com/docker/cli/cli/context/store"
|
"github.com/docker/cli/cli/context/store"
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
@@ -45,7 +43,7 @@ type EndpointDefaultResolver interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||||
func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) {
|
func ResolveDefaultContext(opts *cliflags.CommonOptions, config store.Config) (*DefaultContext, error) {
|
||||||
contextTLSData := store.ContextTLSData{
|
contextTLSData := store.ContextTLSData{
|
||||||
Endpoints: make(map[string]store.EndpointTLSData),
|
Endpoints: make(map[string]store.EndpointTLSData),
|
||||||
}
|
}
|
||||||
@@ -66,7 +64,7 @@ func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
|||||||
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
|
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := storeconfig.ForeachEndpointType(func(n string, get store.TypeGetter) error {
|
if err := config.ForeachEndpointType(func(n string, get store.TypeGetter) error {
|
||||||
if n == docker.DockerEndpoint { // handled above
|
if n == docker.DockerEndpoint { // handled above
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
7
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
@@ -23,3 +23,10 @@ _testmain.go
|
|||||||
*.test
|
*.test
|
||||||
*.prof
|
*.prof
|
||||||
/s2/cmd/_s2sx/sfx-exe
|
/s2/cmd/_s2sx/sfx-exe
|
||||||
|
|
||||||
|
# Linux perf files
|
||||||
|
perf.data
|
||||||
|
perf.data.old
|
||||||
|
|
||||||
|
# gdb history
|
||||||
|
.gdb_history
|
||||||
|
52
vendor/github.com/klauspost/compress/README.md
generated
vendored
52
vendor/github.com/klauspost/compress/README.md
generated
vendored
@@ -17,6 +17,53 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* June 3, 2022 (v1.15.6)
|
||||||
|
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
|
||||||
|
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
|
||||||
|
* zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
|
||||||
|
* zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
|
||||||
|
* zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
|
||||||
|
* gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
|
||||||
|
* s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
|
||||||
|
* s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
|
||||||
|
* snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
|
||||||
|
* s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
|
||||||
|
|
||||||
|
* May 25, 2022 (v1.15.5)
|
||||||
|
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
|
||||||
|
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
|
||||||
|
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
|
||||||
|
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
|
||||||
|
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
|
||||||
|
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
|
||||||
|
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
|
||||||
|
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
|
||||||
|
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
|
||||||
|
|
||||||
|
|
||||||
|
* May 11, 2022 (v1.15.4)
|
||||||
|
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
|
||||||
|
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
|
||||||
|
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
|
||||||
|
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
|
||||||
|
|
||||||
|
* May 5, 2022 (v1.15.3)
|
||||||
|
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
|
||||||
|
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
|
||||||
|
|
||||||
|
* Apr 26, 2022 (v1.15.2)
|
||||||
|
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
|
||||||
|
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
|
||||||
|
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
|
||||||
|
* Minimum version is Go 1.16, added CI test on 1.18.
|
||||||
|
|
||||||
|
* Mar 11, 2022 (v1.15.1)
|
||||||
|
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
|
||||||
|
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
|
||||||
|
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
|
||||||
|
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
|
||||||
|
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||||
|
|
||||||
* Mar 3, 2022 (v1.15.0)
|
* Mar 3, 2022 (v1.15.0)
|
||||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||||
@@ -60,6 +107,9 @@ While the release has been extensively tested, it is recommended to testing when
|
|||||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes to v1.13.x</summary>
|
||||||
|
|
||||||
* Aug 30, 2021 (v1.13.5)
|
* Aug 30, 2021 (v1.13.5)
|
||||||
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||||
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||||
@@ -88,6 +138,8 @@ While the release has been extensively tested, it is recommended to testing when
|
|||||||
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
|
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
|
||||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes to v1.12.x</summary>
|
<summary>See changes to v1.12.x</summary>
|
||||||
|
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
@@ -1,5 +0,0 @@
|
|||||||
package huff0
|
|
||||||
|
|
||||||
//go:generate go run generate.go
|
|
||||||
//go:generate asmfmt -w decompress_amd64.s
|
|
||||||
//go:generate asmfmt -w decompress_8b_amd64.s
|
|
10
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
10
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
|
|||||||
return uint16(b.value >> ((64 - n) & 63))
|
return uint16(b.value >> ((64 - n) & 63))
|
||||||
}
|
}
|
||||||
|
|
||||||
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
|
|
||||||
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
|
|
||||||
return uint16(b.value >> n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitReaderShifted) advance(n uint8) {
|
func (b *bitReaderShifted) advance(n uint8) {
|
||||||
b.bitsRead += n
|
b.bitsRead += n
|
||||||
b.value <<= n & 63
|
b.value <<= n & 63
|
||||||
@@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finished returns true if all bits have been read from the bit stream.
|
|
||||||
func (b *bitReaderShifted) finished() bool {
|
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitReaderShifted) remaining() uint {
|
func (b *bitReaderShifted) remaining() uint {
|
||||||
return b.off*8 + uint(64-b.bitsRead)
|
return b.off*8 + uint(64-b.bitsRead)
|
||||||
}
|
}
|
||||||
|
115
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
115
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
package huff0
|
package huff0
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// bitWriter will write bits.
|
// bitWriter will write bits.
|
||||||
// First bit will be LSB of the first byte of output.
|
// First bit will be LSB of the first byte of output.
|
||||||
type bitWriter struct {
|
type bitWriter struct {
|
||||||
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
|
|||||||
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
|
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
|
||||||
0xFFFF, 0xFFFF} /* up to 16 bits */
|
0xFFFF, 0xFFFF} /* up to 16 bits */
|
||||||
|
|
||||||
// addBits16NC will add up to 16 bits.
|
|
||||||
// It will not check if there is space for them,
|
|
||||||
// so the caller must ensure that it has flushed recently.
|
|
||||||
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
|
|
||||||
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
|
|
||||||
b.nBits += bits
|
|
||||||
}
|
|
||||||
|
|
||||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||||
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||||
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
|||||||
b.nBits += encA.nBits + encB.nBits
|
b.nBits += encA.nBits + encB.nBits
|
||||||
}
|
}
|
||||||
|
|
||||||
// addBits16ZeroNC will add up to 16 bits.
|
|
||||||
// It will not check if there is space for them,
|
|
||||||
// so the caller must ensure that it has flushed recently.
|
|
||||||
// This is fastest if bits can be zero.
|
|
||||||
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
|
|
||||||
if bits == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value <<= (16 - bits) & 15
|
|
||||||
value >>= (16 - bits) & 15
|
|
||||||
b.bitContainer |= uint64(value) << (b.nBits & 63)
|
|
||||||
b.nBits += bits
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush will flush all pending full bytes.
|
|
||||||
// There will be at least 56 bits available for writing when this has been called.
|
|
||||||
// Using flush32 is faster, but leaves less space for writing.
|
|
||||||
func (b *bitWriter) flush() {
|
|
||||||
v := b.nBits >> 3
|
|
||||||
switch v {
|
|
||||||
case 0:
|
|
||||||
return
|
|
||||||
case 1:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 1 << 3
|
|
||||||
case 2:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 2 << 3
|
|
||||||
case 3:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 3 << 3
|
|
||||||
case 4:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 4 << 3
|
|
||||||
case 5:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 5 << 3
|
|
||||||
case 6:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 6 << 3
|
|
||||||
case 7:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
byte(b.bitContainer>>48),
|
|
||||||
)
|
|
||||||
b.bitContainer >>= 7 << 3
|
|
||||||
case 8:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
byte(b.bitContainer>>48),
|
|
||||||
byte(b.bitContainer>>56),
|
|
||||||
)
|
|
||||||
b.bitContainer = 0
|
|
||||||
b.nBits = 0
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
|
|
||||||
}
|
|
||||||
b.nBits &= 7
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||||
func (b *bitWriter) flush32() {
|
func (b *bitWriter) flush32() {
|
||||||
if b.nBits < 32 {
|
if b.nBits < 32 {
|
||||||
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
|
|||||||
b.flushAlign()
|
b.flushAlign()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset and continue writing by appending to out.
|
|
||||||
func (b *bitWriter) reset(out []byte) {
|
|
||||||
b.bitContainer = 0
|
|
||||||
b.nBits = 0
|
|
||||||
b.out = out
|
|
||||||
}
|
|
||||||
|
10
vendor/github.com/klauspost/compress/huff0/bytereader.go
generated
vendored
10
vendor/github.com/klauspost/compress/huff0/bytereader.go
generated
vendored
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
|
|||||||
b.off = 0
|
b.off = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// advance the stream b n bytes.
|
|
||||||
func (b *byteReader) advance(n uint) {
|
|
||||||
b.off += int(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 returns a little endian int32 starting at current offset.
|
// Int32 returns a little endian int32 starting at current offset.
|
||||||
func (b byteReader) Int32() int32 {
|
func (b byteReader) Int32() int32 {
|
||||||
v3 := int32(b.b[b.off+3])
|
v3 := int32(b.b[b.off+3])
|
||||||
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
|
|||||||
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
|
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
|
||||||
}
|
}
|
||||||
|
|
||||||
// unread returns the unread portion of the input.
|
|
||||||
func (b byteReader) unread() []byte {
|
|
||||||
return b.b[b.off:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// remain will return the number of bytes remaining.
|
// remain will return the number of bytes remaining.
|
||||||
func (b byteReader) remain() int {
|
func (b byteReader) remain() int {
|
||||||
return len(b.b) - b.off
|
return len(b.b) - b.off
|
||||||
|
1
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
1
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//lint:ignore U1000 used for debugging
|
||||||
func (s *Scratch) validateTable(c cTable) bool {
|
func (s *Scratch) validateTable(c cTable) bool {
|
||||||
if len(c) < int(s.symbolLen) {
|
if len(c) < int(s.symbolLen) {
|
||||||
return false
|
return false
|
||||||
|
113
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
113
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
type dTable struct {
|
type dTable struct {
|
||||||
single []dEntrySingle
|
single []dEntrySingle
|
||||||
double []dEntryDouble
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// single-symbols decoding
|
// single-symbols decoding
|
||||||
@@ -19,13 +18,6 @@ type dEntrySingle struct {
|
|||||||
entry uint16
|
entry uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// double-symbols decoding
|
|
||||||
type dEntryDouble struct {
|
|
||||||
seq [4]byte
|
|
||||||
nBits uint8
|
|
||||||
len uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses special code for all tables that are < 8 bits.
|
// Uses special code for all tables that are < 8 bits.
|
||||||
const use8BitTables = true
|
const use8BitTables = true
|
||||||
|
|
||||||
@@ -35,7 +27,7 @@ const use8BitTables = true
|
|||||||
// If no Scratch is provided a new one is allocated.
|
// If no Scratch is provided a new one is allocated.
|
||||||
// The returned Scratch can be used for encoding or decoding input using this table.
|
// The returned Scratch can be used for encoding or decoding input using this table.
|
||||||
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
||||||
s, err = s.prepare(in)
|
s, err = s.prepare(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, nil, err
|
return s, nil, err
|
||||||
}
|
}
|
||||||
@@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
|
|||||||
return &[4][256]byte{}
|
return &[4][256]byte{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decompress1X will decompress a 1X encoded stream.
|
|
||||||
// The cap of the output buffer will be the maximum decompressed size.
|
|
||||||
// The length of the supplied input must match the end of a block exactly.
|
|
||||||
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
|
||||||
if len(d.dt.single) == 0 {
|
|
||||||
return nil, errors.New("no table loaded")
|
|
||||||
}
|
|
||||||
if use8BitTables && d.actualTableLog <= 8 {
|
|
||||||
return d.decompress1X8Bit(dst, src)
|
|
||||||
}
|
|
||||||
var br bitReaderShifted
|
|
||||||
err := br.init(src)
|
|
||||||
if err != nil {
|
|
||||||
return dst, err
|
|
||||||
}
|
|
||||||
maxDecodedSize := cap(dst)
|
|
||||||
dst = dst[:0]
|
|
||||||
|
|
||||||
// Avoid bounds check by always having full sized table.
|
|
||||||
const tlSize = 1 << tableLogMax
|
|
||||||
const tlMask = tlSize - 1
|
|
||||||
dt := d.dt.single[:tlSize]
|
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
|
||||||
bufs := d.buffer()
|
|
||||||
buf := &bufs[0]
|
|
||||||
var off uint8
|
|
||||||
|
|
||||||
for br.off >= 8 {
|
|
||||||
br.fillFast()
|
|
||||||
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
|
||||||
br.advance(uint8(v.entry))
|
|
||||||
buf[off+0] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
|
||||||
br.advance(uint8(v.entry))
|
|
||||||
buf[off+1] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
// Refill
|
|
||||||
br.fillFast()
|
|
||||||
|
|
||||||
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
|
||||||
br.advance(uint8(v.entry))
|
|
||||||
buf[off+2] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
|
||||||
br.advance(uint8(v.entry))
|
|
||||||
buf[off+3] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
off += 4
|
|
||||||
if off == 0 {
|
|
||||||
if len(dst)+256 > maxDecodedSize {
|
|
||||||
br.close()
|
|
||||||
d.bufs.Put(bufs)
|
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
|
||||||
}
|
|
||||||
dst = append(dst, buf[:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
|
||||||
d.bufs.Put(bufs)
|
|
||||||
br.close()
|
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
|
||||||
}
|
|
||||||
dst = append(dst, buf[:off]...)
|
|
||||||
|
|
||||||
// br < 8, so uint8 is fine
|
|
||||||
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
|
|
||||||
for bitsLeft > 0 {
|
|
||||||
br.fill()
|
|
||||||
if false && br.bitsRead >= 32 {
|
|
||||||
if br.off >= 4 {
|
|
||||||
v := br.in[br.off-4:]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
br.value = (br.value << 32) | uint64(low)
|
|
||||||
br.bitsRead -= 32
|
|
||||||
br.off -= 4
|
|
||||||
} else {
|
|
||||||
for br.off > 0 {
|
|
||||||
br.value = (br.value << 8) | uint64(br.in[br.off-1])
|
|
||||||
br.bitsRead -= 8
|
|
||||||
br.off--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(dst) >= maxDecodedSize {
|
|
||||||
d.bufs.Put(bufs)
|
|
||||||
br.close()
|
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
|
||||||
}
|
|
||||||
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
|
|
||||||
nBits := uint8(v.entry)
|
|
||||||
br.advance(nBits)
|
|
||||||
bitsLeft -= nBits
|
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
|
||||||
}
|
|
||||||
d.bufs.Put(bufs)
|
|
||||||
return dst, br.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
|
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
|
||||||
// The cap of the output buffer will be the maximum decompressed size.
|
// The cap of the output buffer will be the maximum decompressed size.
|
||||||
// The length of the supplied input must match the end of a block exactly.
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
@@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
|||||||
|
|
||||||
const shift = 56
|
const shift = 56
|
||||||
const tlSize = 1 << 8
|
const tlSize = 1 << 8
|
||||||
const tlMask = tlSize - 1
|
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
|
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
@@ -1,488 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !noasm
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
#include "funcdata.h"
|
|
||||||
#include "go_asm.h"
|
|
||||||
|
|
||||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
|
||||||
|
|
||||||
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
|
||||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
|
||||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
|
||||||
#define off R8
|
|
||||||
#define buffer DI
|
|
||||||
#define table SI
|
|
||||||
|
|
||||||
#define br_bits_read R9
|
|
||||||
#define br_value R10
|
|
||||||
#define br_offset R11
|
|
||||||
#define peek_bits R12
|
|
||||||
#define exhausted DX
|
|
||||||
|
|
||||||
#define br0 R13
|
|
||||||
#define br1 R14
|
|
||||||
#define br2 R15
|
|
||||||
#define br3 BP
|
|
||||||
|
|
||||||
MOVQ BP, 0(SP)
|
|
||||||
|
|
||||||
XORQ exhausted, exhausted // exhausted = false
|
|
||||||
XORQ off, off // off = 0
|
|
||||||
|
|
||||||
MOVBQZX peekBits+32(FP), peek_bits
|
|
||||||
MOVQ buf+40(FP), buffer
|
|
||||||
MOVQ tbl+48(FP), table
|
|
||||||
|
|
||||||
MOVQ pbr0+0(FP), br0
|
|
||||||
MOVQ pbr1+8(FP), br1
|
|
||||||
MOVQ pbr2+16(FP), br2
|
|
||||||
MOVQ pbr3+24(FP), br3
|
|
||||||
|
|
||||||
main_loop:
|
|
||||||
|
|
||||||
// const stream = 0
|
|
||||||
// br0.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br0), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br0), br_offset
|
|
||||||
|
|
||||||
// if b.bitsRead >= 32 {
|
|
||||||
CMPQ br_bits_read, $32
|
|
||||||
JB skip_fill0
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br0), AX
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br0.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
|
|
||||||
// }
|
|
||||||
skip_fill0:
|
|
||||||
|
|
||||||
// val0 := br0.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val1 := br0.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, 0(buffer)(off*1)
|
|
||||||
|
|
||||||
// SECOND PART:
|
|
||||||
// val2 := br0.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val3 := br0.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v3 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
|
||||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
|
||||||
MOVW BX, 0+2(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br0)
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br0)
|
|
||||||
|
|
||||||
// const stream = 1
|
|
||||||
// br1.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br1), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br1), br_offset
|
|
||||||
|
|
||||||
// if b.bitsRead >= 32 {
|
|
||||||
CMPQ br_bits_read, $32
|
|
||||||
JB skip_fill1
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br1), AX
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br1.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
|
|
||||||
// }
|
|
||||||
skip_fill1:
|
|
||||||
|
|
||||||
// val0 := br1.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val1 := br1.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, 256(buffer)(off*1)
|
|
||||||
|
|
||||||
// SECOND PART:
|
|
||||||
// val2 := br1.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val3 := br1.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v3 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
|
||||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
|
||||||
MOVW BX, 256+2(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br1)
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br1)
|
|
||||||
|
|
||||||
// const stream = 2
|
|
||||||
// br2.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br2), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br2), br_offset
|
|
||||||
|
|
||||||
// if b.bitsRead >= 32 {
|
|
||||||
CMPQ br_bits_read, $32
|
|
||||||
JB skip_fill2
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br2), AX
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br2.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
|
|
||||||
// }
|
|
||||||
skip_fill2:
|
|
||||||
|
|
||||||
// val0 := br2.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val1 := br2.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, 512(buffer)(off*1)
|
|
||||||
|
|
||||||
// SECOND PART:
|
|
||||||
// val2 := br2.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val3 := br2.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v3 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
|
||||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
|
||||||
MOVW BX, 512+2(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br2)
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br2)
|
|
||||||
|
|
||||||
// const stream = 3
|
|
||||||
// br3.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br3), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br3), br_offset
|
|
||||||
|
|
||||||
// if b.bitsRead >= 32 {
|
|
||||||
CMPQ br_bits_read, $32
|
|
||||||
JB skip_fill3
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br3), AX
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br3.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
|
|
||||||
// }
|
|
||||||
skip_fill3:
|
|
||||||
|
|
||||||
// val0 := br3.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val1 := br3.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, 768(buffer)(off*1)
|
|
||||||
|
|
||||||
// SECOND PART:
|
|
||||||
// val2 := br3.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val3 := br3.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v3 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
|
||||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
|
||||||
MOVW BX, 768+2(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br3)
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br3)
|
|
||||||
|
|
||||||
ADDQ $4, off // off += 2
|
|
||||||
|
|
||||||
TESTB DH, DH // any br[i].ofs < 4?
|
|
||||||
JNZ end
|
|
||||||
|
|
||||||
CMPQ off, $bufoff
|
|
||||||
JL main_loop
|
|
||||||
|
|
||||||
end:
|
|
||||||
MOVQ 0(SP), BP
|
|
||||||
|
|
||||||
MOVB off, ret+56(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
#undef off
|
|
||||||
#undef buffer
|
|
||||||
#undef table
|
|
||||||
|
|
||||||
#undef br_bits_read
|
|
||||||
#undef br_value
|
|
||||||
#undef br_offset
|
|
||||||
#undef peek_bits
|
|
||||||
#undef exhausted
|
|
||||||
|
|
||||||
#undef br0
|
|
||||||
#undef br1
|
|
||||||
#undef br2
|
|
||||||
#undef br3
|
|
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
@@ -1,197 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !noasm
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
#include "funcdata.h"
|
|
||||||
#include "go_asm.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
|
||||||
|
|
||||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
|
||||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
|
||||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
|
||||||
#define off R8
|
|
||||||
#define buffer DI
|
|
||||||
#define table SI
|
|
||||||
|
|
||||||
#define br_bits_read R9
|
|
||||||
#define br_value R10
|
|
||||||
#define br_offset R11
|
|
||||||
#define peek_bits R12
|
|
||||||
#define exhausted DX
|
|
||||||
|
|
||||||
#define br0 R13
|
|
||||||
#define br1 R14
|
|
||||||
#define br2 R15
|
|
||||||
#define br3 BP
|
|
||||||
|
|
||||||
MOVQ BP, 0(SP)
|
|
||||||
|
|
||||||
XORQ exhausted, exhausted // exhausted = false
|
|
||||||
XORQ off, off // off = 0
|
|
||||||
|
|
||||||
MOVBQZX peekBits+32(FP), peek_bits
|
|
||||||
MOVQ buf+40(FP), buffer
|
|
||||||
MOVQ tbl+48(FP), table
|
|
||||||
|
|
||||||
MOVQ pbr0+0(FP), br0
|
|
||||||
MOVQ pbr1+8(FP), br1
|
|
||||||
MOVQ pbr2+16(FP), br2
|
|
||||||
MOVQ pbr3+24(FP), br3
|
|
||||||
|
|
||||||
main_loop:
|
|
||||||
{{ define "decode_2_values_x86" }}
|
|
||||||
// const stream = {{ var "id" }}
|
|
||||||
// br{{ var "id"}}.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
|
||||||
|
|
||||||
// if b.bitsRead >= 32 {
|
|
||||||
CMPQ br_bits_read, $32
|
|
||||||
JB skip_fill{{ var "id" }}
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
// }
|
|
||||||
skip_fill{{ var "id" }}:
|
|
||||||
|
|
||||||
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
|
||||||
|
|
||||||
// SECOND PART:
|
|
||||||
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
|
|
||||||
// v3 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CX, br_value // value <<= n
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
|
||||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
|
||||||
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ set "id" "0" }}
|
|
||||||
{{ set "ofs" "0" }}
|
|
||||||
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "1" }}
|
|
||||||
{{ set "ofs" "8" }}
|
|
||||||
{{ set "bufofs" "256" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "2" }}
|
|
||||||
{{ set "ofs" "16" }}
|
|
||||||
{{ set "bufofs" "512" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "3" }}
|
|
||||||
{{ set "ofs" "24" }}
|
|
||||||
{{ set "bufofs" "768" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
ADDQ $4, off // off += 2
|
|
||||||
|
|
||||||
TESTB DH, DH // any br[i].ofs < 4?
|
|
||||||
JNZ end
|
|
||||||
|
|
||||||
CMPQ off, $bufoff
|
|
||||||
JL main_loop
|
|
||||||
end:
|
|
||||||
MOVQ 0(SP), BP
|
|
||||||
|
|
||||||
MOVB off, ret+56(FP)
|
|
||||||
RET
|
|
||||||
#undef off
|
|
||||||
#undef buffer
|
|
||||||
#undef table
|
|
||||||
|
|
||||||
#undef br_bits_read
|
|
||||||
#undef br_value
|
|
||||||
#undef br_offset
|
|
||||||
#undef peek_bits
|
|
||||||
#undef exhausted
|
|
||||||
|
|
||||||
#undef br0
|
|
||||||
#undef br1
|
|
||||||
#undef br2
|
|
||||||
#undef br3
|
|
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
@@ -2,30 +2,43 @@
|
|||||||
// +build amd64,!appengine,!noasm,gc
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
// This file contains the specialisation of Decoder.Decompress4X
|
// This file contains the specialisation of Decoder.Decompress4X
|
||||||
// that uses an asm implementation of its main loop.
|
// and Decoder.Decompress1X that use an asm implementation of thir main loops.
|
||||||
package huff0
|
package huff0
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/internal/cpuinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||||
// of Decompress4X when tablelog > 8.
|
// of Decompress4X when tablelog > 8.
|
||||||
// go:noescape
|
//go:noescape
|
||||||
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
|
||||||
|
|
||||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||||
// per loop.
|
// per loop.
|
||||||
// go:noescape
|
//go:noescape
|
||||||
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
|
||||||
|
|
||||||
// fallback8BitSize is the size where using Go version is faster.
|
// fallback8BitSize is the size where using Go version is faster.
|
||||||
const fallback8BitSize = 800
|
const fallback8BitSize = 800
|
||||||
|
|
||||||
|
type decompress4xContext struct {
|
||||||
|
pbr0 *bitReaderShifted
|
||||||
|
pbr1 *bitReaderShifted
|
||||||
|
pbr2 *bitReaderShifted
|
||||||
|
pbr3 *bitReaderShifted
|
||||||
|
peekBits uint8
|
||||||
|
out *byte
|
||||||
|
dstEvery int
|
||||||
|
tbl *dEntrySingle
|
||||||
|
decoded int
|
||||||
|
limit *byte
|
||||||
|
}
|
||||||
|
|
||||||
// Decompress4X will decompress a 4X encoded stream.
|
// Decompress4X will decompress a 4X encoded stream.
|
||||||
// The length of the supplied input must match the end of a block exactly.
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
// The *capacity* of the dst slice must match the destination size of
|
// The *capacity* of the dst slice must match the destination size of
|
||||||
@@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
if cap(dst) < fallback8BitSize && use8BitTables {
|
if cap(dst) < fallback8BitSize && use8BitTables {
|
||||||
return d.decompress4X8bit(dst, src)
|
return d.decompress4X8bit(dst, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
var br [4]bitReaderShifted
|
var br [4]bitReaderShifted
|
||||||
// Decode "jump table"
|
// Decode "jump table"
|
||||||
start := 6
|
start := 6
|
||||||
@@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
const tlMask = tlSize - 1
|
const tlMask = tlSize - 1
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
|
||||||
buf := d.buffer()
|
|
||||||
var off uint8
|
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
const debug = false
|
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
|
||||||
|
ctx := decompress4xContext{
|
||||||
// see: bitReaderShifted.peekBitsFast()
|
pbr0: &br[0],
|
||||||
peekBits := uint8((64 - d.actualTableLog) & 63)
|
pbr1: &br[1],
|
||||||
|
pbr2: &br[2],
|
||||||
// Decode 2 values from each decoder/loop.
|
pbr3: &br[3],
|
||||||
const bufoff = 256
|
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
|
||||||
for {
|
out: &out[0],
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
dstEvery: dstEvery,
|
||||||
break
|
tbl: &single[0],
|
||||||
|
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
|
||||||
}
|
}
|
||||||
|
|
||||||
if use8BitTables {
|
if use8BitTables {
|
||||||
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
decompress4x_8b_main_loop_amd64(&ctx)
|
||||||
} else {
|
} else {
|
||||||
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
decompress4x_main_loop_amd64(&ctx)
|
||||||
}
|
|
||||||
if debug {
|
|
||||||
fmt.Print("DEBUG: ")
|
|
||||||
fmt.Printf("off=%d,", off)
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
|
|
||||||
i, br[i].bitsRead, br[i].value, br[i].off)
|
|
||||||
}
|
|
||||||
fmt.Println("")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if off != 0 {
|
decoded = ctx.decoded
|
||||||
break
|
out = out[decoded/4:]
|
||||||
}
|
|
||||||
|
|
||||||
if bufoff > dstEvery {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
|
||||||
}
|
|
||||||
copy(out, buf[0][:])
|
|
||||||
copy(out[dstEvery:], buf[1][:])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:])
|
|
||||||
out = out[bufoff:]
|
|
||||||
decoded += bufoff * 4
|
|
||||||
// There must at least be 3 buffers left.
|
|
||||||
if len(out) < dstEvery*3 {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if off > 0 {
|
|
||||||
ioff := int(off)
|
|
||||||
if len(out) < dstEvery*3+ioff {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
|
||||||
}
|
|
||||||
copy(out, buf[0][:off])
|
|
||||||
copy(out[dstEvery:], buf[1][:off])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:off])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:off])
|
|
||||||
decoded += int(off) * 4
|
|
||||||
out = out[off:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
@@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
br.fill()
|
br.fill()
|
||||||
if offset >= endsAt {
|
if offset >= endsAt {
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
if offset != endsAt {
|
if offset != endsAt {
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
}
|
}
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
@@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.bufs.Put(buf)
|
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||||
|
// of Decompress1X when tablelog > 8.
|
||||||
|
//go:noescape
|
||||||
|
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||||
|
|
||||||
|
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
|
||||||
|
// of Decompress1X when tablelog > 8.
|
||||||
|
//go:noescape
|
||||||
|
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
|
||||||
|
|
||||||
|
type decompress1xContext struct {
|
||||||
|
pbr *bitReaderShifted
|
||||||
|
peekBits uint8
|
||||||
|
out *byte
|
||||||
|
outCap int
|
||||||
|
tbl *dEntrySingle
|
||||||
|
decoded int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error reported by asm implementations
|
||||||
|
const error_max_decoded_size_exeeded = -1
|
||||||
|
|
||||||
|
// Decompress1X will decompress a 1X encoded stream.
|
||||||
|
// The cap of the output buffer will be the maximum decompressed size.
|
||||||
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
|
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
|
if len(d.dt.single) == 0 {
|
||||||
|
return nil, errors.New("no table loaded")
|
||||||
|
}
|
||||||
|
var br bitReaderShifted
|
||||||
|
err := br.init(src)
|
||||||
|
if err != nil {
|
||||||
|
return dst, err
|
||||||
|
}
|
||||||
|
maxDecodedSize := cap(dst)
|
||||||
|
dst = dst[:maxDecodedSize]
|
||||||
|
|
||||||
|
const tlSize = 1 << tableLogMax
|
||||||
|
const tlMask = tlSize - 1
|
||||||
|
|
||||||
|
if maxDecodedSize >= 4 {
|
||||||
|
ctx := decompress1xContext{
|
||||||
|
pbr: &br,
|
||||||
|
out: &dst[0],
|
||||||
|
outCap: maxDecodedSize,
|
||||||
|
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
|
||||||
|
tbl: &d.dt.single[0],
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpuinfo.HasBMI2() {
|
||||||
|
decompress1x_main_loop_bmi2(&ctx)
|
||||||
|
} else {
|
||||||
|
decompress1x_main_loop_amd64(&ctx)
|
||||||
|
}
|
||||||
|
if ctx.decoded == error_max_decoded_size_exeeded {
|
||||||
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = dst[:ctx.decoded]
|
||||||
|
}
|
||||||
|
|
||||||
|
// br < 8, so uint8 is fine
|
||||||
|
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
|
||||||
|
for bitsLeft > 0 {
|
||||||
|
br.fill()
|
||||||
|
if len(dst) >= maxDecodedSize {
|
||||||
|
br.close()
|
||||||
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
|
}
|
||||||
|
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
nBits := uint8(v.entry)
|
||||||
|
br.advance(nBits)
|
||||||
|
bitsLeft -= nBits
|
||||||
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
|
}
|
||||||
|
return dst, br.close()
|
||||||
|
}
|
||||||
|
1185
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
1185
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
@@ -1,195 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !noasm
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
#include "funcdata.h"
|
|
||||||
#include "go_asm.h"
|
|
||||||
|
|
||||||
#ifdef GOAMD64_v4
|
|
||||||
#ifndef GOAMD64_v3
|
|
||||||
#define GOAMD64_v3
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
|
||||||
|
|
||||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
|
||||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
|
||||||
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
|
|
||||||
#define off R8
|
|
||||||
#define buffer DI
|
|
||||||
#define table SI
|
|
||||||
|
|
||||||
#define br_bits_read R9
|
|
||||||
#define br_value R10
|
|
||||||
#define br_offset R11
|
|
||||||
#define peek_bits R12
|
|
||||||
#define exhausted DX
|
|
||||||
|
|
||||||
#define br0 R13
|
|
||||||
#define br1 R14
|
|
||||||
#define br2 R15
|
|
||||||
#define br3 BP
|
|
||||||
|
|
||||||
MOVQ BP, 0(SP)
|
|
||||||
|
|
||||||
XORQ exhausted, exhausted // exhausted = false
|
|
||||||
XORQ off, off // off = 0
|
|
||||||
|
|
||||||
MOVBQZX peekBits+32(FP), peek_bits
|
|
||||||
MOVQ buf+40(FP), buffer
|
|
||||||
MOVQ tbl+48(FP), table
|
|
||||||
|
|
||||||
MOVQ pbr0+0(FP), br0
|
|
||||||
MOVQ pbr1+8(FP), br1
|
|
||||||
MOVQ pbr2+16(FP), br2
|
|
||||||
MOVQ pbr3+24(FP), br3
|
|
||||||
|
|
||||||
main_loop:
|
|
||||||
{{ define "decode_2_values_x86" }}
|
|
||||||
// const stream = {{ var "id" }}
|
|
||||||
// br{{ var "id"}}.fillFast()
|
|
||||||
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
|
||||||
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
|
||||||
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
|
||||||
|
|
||||||
// We must have at least 2 * max tablelog left
|
|
||||||
CMPQ br_bits_read, $64-22
|
|
||||||
JBE skip_fill{{ var "id" }}
|
|
||||||
|
|
||||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
|
||||||
SUBQ $4, br_offset // b.off -= 4
|
|
||||||
|
|
||||||
// v := b.in[b.off-4 : b.off]
|
|
||||||
// v = v[:4]
|
|
||||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
|
||||||
#ifdef GOAMD64_v3
|
|
||||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
|
||||||
#else
|
|
||||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
|
||||||
MOVQ br_bits_read, CX
|
|
||||||
SHLQ CL, AX
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ORQ AX, br_value
|
|
||||||
|
|
||||||
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
|
||||||
CMPQ br_offset, $4
|
|
||||||
SETLT DL
|
|
||||||
ORB DL, DH
|
|
||||||
// }
|
|
||||||
skip_fill{{ var "id" }}:
|
|
||||||
|
|
||||||
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
#ifdef GOAMD64_v3
|
|
||||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
|
||||||
#else
|
|
||||||
MOVQ br_value, AX
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v0
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
|
||||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
|
||||||
|
|
||||||
#ifdef GOAMD64_v3
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLXQ AX, br_value, br_value // value <<= n
|
|
||||||
#else
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef GOAMD64_v3
|
|
||||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
|
||||||
#else
|
|
||||||
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
|
||||||
MOVQ peek_bits, CX
|
|
||||||
MOVQ br_value, AX
|
|
||||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
|
||||||
MOVW 0(table)(AX*2), AX // AX - v1
|
|
||||||
|
|
||||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
|
||||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
|
||||||
|
|
||||||
#ifdef GOAMD64_v3
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLXQ AX, br_value, br_value // value <<= n
|
|
||||||
#else
|
|
||||||
MOVBQZX AL, CX
|
|
||||||
SHLQ CL, br_value // value <<= n
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ADDQ CX, br_bits_read // bits_read += n
|
|
||||||
|
|
||||||
|
|
||||||
// these two writes get coalesced
|
|
||||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
|
||||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
|
||||||
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
|
||||||
|
|
||||||
// update the bitrader reader structure
|
|
||||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
|
||||||
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
|
||||||
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ set "id" "0" }}
|
|
||||||
{{ set "ofs" "0" }}
|
|
||||||
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "1" }}
|
|
||||||
{{ set "ofs" "8" }}
|
|
||||||
{{ set "bufofs" "256" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "2" }}
|
|
||||||
{{ set "ofs" "16" }}
|
|
||||||
{{ set "bufofs" "512" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
{{ set "id" "3" }}
|
|
||||||
{{ set "ofs" "24" }}
|
|
||||||
{{ set "bufofs" "768" }}
|
|
||||||
{{ template "decode_2_values_x86" . }}
|
|
||||||
|
|
||||||
ADDQ $2, off // off += 2
|
|
||||||
|
|
||||||
TESTB DH, DH // any br[i].ofs < 4?
|
|
||||||
JNZ end
|
|
||||||
|
|
||||||
CMPQ off, $bufoff
|
|
||||||
JL main_loop
|
|
||||||
end:
|
|
||||||
MOVQ 0(SP), BP
|
|
||||||
|
|
||||||
MOVB off, ret+56(FP)
|
|
||||||
RET
|
|
||||||
#undef off
|
|
||||||
#undef buffer
|
|
||||||
#undef table
|
|
||||||
|
|
||||||
#undef br_bits_read
|
|
||||||
#undef br_value
|
|
||||||
#undef br_offset
|
|
||||||
#undef peek_bits
|
|
||||||
#undef exhausted
|
|
||||||
|
|
||||||
#undef br0
|
|
||||||
#undef br1
|
|
||||||
#undef br2
|
|
||||||
#undef br3
|
|
102
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
102
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
@@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Decompress1X will decompress a 1X encoded stream.
|
||||||
|
// The cap of the output buffer will be the maximum decompressed size.
|
||||||
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
|
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
|
if len(d.dt.single) == 0 {
|
||||||
|
return nil, errors.New("no table loaded")
|
||||||
|
}
|
||||||
|
if use8BitTables && d.actualTableLog <= 8 {
|
||||||
|
return d.decompress1X8Bit(dst, src)
|
||||||
|
}
|
||||||
|
var br bitReaderShifted
|
||||||
|
err := br.init(src)
|
||||||
|
if err != nil {
|
||||||
|
return dst, err
|
||||||
|
}
|
||||||
|
maxDecodedSize := cap(dst)
|
||||||
|
dst = dst[:0]
|
||||||
|
|
||||||
|
// Avoid bounds check by always having full sized table.
|
||||||
|
const tlSize = 1 << tableLogMax
|
||||||
|
const tlMask = tlSize - 1
|
||||||
|
dt := d.dt.single[:tlSize]
|
||||||
|
|
||||||
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
|
var off uint8
|
||||||
|
|
||||||
|
for br.off >= 8 {
|
||||||
|
br.fillFast()
|
||||||
|
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
br.advance(uint8(v.entry))
|
||||||
|
buf[off+0] = uint8(v.entry >> 8)
|
||||||
|
|
||||||
|
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
br.advance(uint8(v.entry))
|
||||||
|
buf[off+1] = uint8(v.entry >> 8)
|
||||||
|
|
||||||
|
// Refill
|
||||||
|
br.fillFast()
|
||||||
|
|
||||||
|
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
br.advance(uint8(v.entry))
|
||||||
|
buf[off+2] = uint8(v.entry >> 8)
|
||||||
|
|
||||||
|
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
br.advance(uint8(v.entry))
|
||||||
|
buf[off+3] = uint8(v.entry >> 8)
|
||||||
|
|
||||||
|
off += 4
|
||||||
|
if off == 0 {
|
||||||
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
|
}
|
||||||
|
dst = append(dst, buf[:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
|
br.close()
|
||||||
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
|
}
|
||||||
|
dst = append(dst, buf[:off]...)
|
||||||
|
|
||||||
|
// br < 8, so uint8 is fine
|
||||||
|
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
|
||||||
|
for bitsLeft > 0 {
|
||||||
|
br.fill()
|
||||||
|
if false && br.bitsRead >= 32 {
|
||||||
|
if br.off >= 4 {
|
||||||
|
v := br.in[br.off-4:]
|
||||||
|
v = v[:4]
|
||||||
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
br.value = (br.value << 32) | uint64(low)
|
||||||
|
br.bitsRead -= 32
|
||||||
|
br.off -= 4
|
||||||
|
} else {
|
||||||
|
for br.off > 0 {
|
||||||
|
br.value = (br.value << 8) | uint64(br.in[br.off-1])
|
||||||
|
br.bitsRead -= 8
|
||||||
|
br.off--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dst) >= maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
|
br.close()
|
||||||
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
|
}
|
||||||
|
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
|
||||||
|
nBits := uint8(v.entry)
|
||||||
|
br.advance(nBits)
|
||||||
|
bitsLeft -= nBits
|
||||||
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
|
return dst, br.close()
|
||||||
|
}
|
||||||
|
34
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
generated
vendored
Normal file
34
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
// Package cpuinfo gives runtime info about the current CPU.
|
||||||
|
//
|
||||||
|
// This is a very limited module meant for use internally
|
||||||
|
// in this project. For more versatile solution check
|
||||||
|
// https://github.com/klauspost/cpuid.
|
||||||
|
package cpuinfo
|
||||||
|
|
||||||
|
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
|
||||||
|
func HasBMI1() bool {
|
||||||
|
return hasBMI1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
|
||||||
|
func HasBMI2() bool {
|
||||||
|
return hasBMI2
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableBMI2 will disable BMI2, for testing purposes.
|
||||||
|
// Call returned function to restore previous state.
|
||||||
|
func DisableBMI2() func() {
|
||||||
|
old := hasBMI2
|
||||||
|
hasBMI2 = false
|
||||||
|
return func() {
|
||||||
|
hasBMI2 = old
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
|
||||||
|
func HasBMI() bool {
|
||||||
|
return HasBMI1() && HasBMI2()
|
||||||
|
}
|
||||||
|
|
||||||
|
var hasBMI1 bool
|
||||||
|
var hasBMI2 bool
|
11
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
|
package cpuinfo
|
||||||
|
|
||||||
|
// go:noescape
|
||||||
|
func x86extensions() (bmi1, bmi2 bool)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
hasBMI1, hasBMI2 = x86extensions()
|
||||||
|
}
|
36
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
generated
vendored
Normal file
36
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
#include "funcdata.h"
|
||||||
|
#include "go_asm.h"
|
||||||
|
|
||||||
|
TEXT ·x86extensions(SB), NOSPLIT, $0
|
||||||
|
// 1. determine max EAX value
|
||||||
|
XORQ AX, AX
|
||||||
|
CPUID
|
||||||
|
|
||||||
|
CMPQ AX, $7
|
||||||
|
JB unsupported
|
||||||
|
|
||||||
|
// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
|
||||||
|
MOVQ $7, AX
|
||||||
|
MOVQ $0, CX
|
||||||
|
CPUID
|
||||||
|
|
||||||
|
BTQ $3, BX // bit 3 = BMI1
|
||||||
|
SETCS AL
|
||||||
|
|
||||||
|
BTQ $8, BX // bit 8 = BMI2
|
||||||
|
SETCS AH
|
||||||
|
|
||||||
|
MOVB AL, bmi1+0(FP)
|
||||||
|
MOVB AH, bmi2+1(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
unsupported:
|
||||||
|
XORQ AX, AX
|
||||||
|
MOVB AL, bmi1+0(FP)
|
||||||
|
MOVB AL, bmi2+1(FP)
|
||||||
|
RET
|
54
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
54
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
|
|||||||
|
|
||||||
### Benchmarks
|
### Benchmarks
|
||||||
|
|
||||||
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
|
|
||||||
|
|
||||||
The first two are streaming decodes and the last are smaller inputs.
|
The first two are streaming decodes and the last are smaller inputs.
|
||||||
|
|
||||||
|
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
|
||||||
|
|
||||||
```
|
```
|
||||||
BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
|
BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
|
||||||
BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
|
BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
|
||||||
|
|
||||||
BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
|
Concurrent blocks, performance:
|
||||||
BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
|
|
||||||
|
|
||||||
Concurrent performance:
|
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
|
||||||
|
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
|
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
|
||||||
BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
|
|
||||||
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
|
|
||||||
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
|
|
||||||
```
|
```
|
||||||
|
|
||||||
This reflects the performance around May 2020, but this may be out of date.
|
This reflects the performance around May 2022, but this may be out of date.
|
||||||
|
|
||||||
## Zstd inside ZIP files
|
## Zstd inside ZIP files
|
||||||
|
|
||||||
|
7
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
@@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bitReader) get16BitsFast(n uint8) uint16 {
|
|
||||||
const regMask = 64 - 1
|
|
||||||
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
|
||||||
b.bitsRead += n
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillFast() will make sure at least 32 bits are available.
|
// fillFast() will make sure at least 32 bits are available.
|
||||||
// There must be at least 4 bytes available.
|
// There must be at least 4 bytes available.
|
||||||
func (b *bitReader) fillFast() {
|
func (b *bitReader) fillFast() {
|
||||||
|
76
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
76
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
@@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// bitWriter will write bits.
|
// bitWriter will write bits.
|
||||||
// First bit will be LSB of the first byte of output.
|
// First bit will be LSB of the first byte of output.
|
||||||
type bitWriter struct {
|
type bitWriter struct {
|
||||||
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
|||||||
b.nBits += bits
|
b.nBits += bits
|
||||||
}
|
}
|
||||||
|
|
||||||
// flush will flush all pending full bytes.
|
|
||||||
// There will be at least 56 bits available for writing when this has been called.
|
|
||||||
// Using flush32 is faster, but leaves less space for writing.
|
|
||||||
func (b *bitWriter) flush() {
|
|
||||||
v := b.nBits >> 3
|
|
||||||
switch v {
|
|
||||||
case 0:
|
|
||||||
case 1:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
)
|
|
||||||
case 2:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
)
|
|
||||||
case 3:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
)
|
|
||||||
case 4:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
)
|
|
||||||
case 5:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
)
|
|
||||||
case 6:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
)
|
|
||||||
case 7:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
byte(b.bitContainer>>48),
|
|
||||||
)
|
|
||||||
case 8:
|
|
||||||
b.out = append(b.out,
|
|
||||||
byte(b.bitContainer),
|
|
||||||
byte(b.bitContainer>>8),
|
|
||||||
byte(b.bitContainer>>16),
|
|
||||||
byte(b.bitContainer>>24),
|
|
||||||
byte(b.bitContainer>>32),
|
|
||||||
byte(b.bitContainer>>40),
|
|
||||||
byte(b.bitContainer>>48),
|
|
||||||
byte(b.bitContainer>>56),
|
|
||||||
)
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
|
|
||||||
}
|
|
||||||
b.bitContainer >>= v << 3
|
|
||||||
b.nBits &= 7
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||||
func (b *bitWriter) flush32() {
|
func (b *bitWriter) flush32() {
|
||||||
if b.nBits < 32 {
|
if b.nBits < 32 {
|
||||||
|
73
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
73
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@@ -5,9 +5,14 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/klauspost/compress/huff0"
|
"github.com/klauspost/compress/huff0"
|
||||||
@@ -38,14 +43,14 @@ const (
|
|||||||
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
|
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
|
||||||
maxCompressedBlockSize = 128 << 10
|
maxCompressedBlockSize = 128 << 10
|
||||||
|
|
||||||
|
compressedBlockOverAlloc = 16
|
||||||
|
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
|
||||||
|
|
||||||
// Maximum possible block size (all Raw+Uncompressed).
|
// Maximum possible block size (all Raw+Uncompressed).
|
||||||
maxBlockSize = (1 << 21) - 1
|
maxBlockSize = (1 << 21) - 1
|
||||||
|
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
|
maxMatchLen = 131074
|
||||||
maxCompressedLiteralSize = 1 << 18
|
maxSequences = 0x7f00 + 0xffff
|
||||||
maxRLELiteralSize = 1 << 20
|
|
||||||
maxMatchLen = 131074
|
|
||||||
maxSequences = 0x7f00 + 0xffff
|
|
||||||
|
|
||||||
// We support slightly less than the reference decoder to be able to
|
// We support slightly less than the reference decoder to be able to
|
||||||
// use ints on 32 bit archs.
|
// use ints on 32 bit archs.
|
||||||
@@ -97,7 +102,6 @@ type blockDec struct {
|
|||||||
|
|
||||||
// Block is RLE, this is the size.
|
// Block is RLE, this is the size.
|
||||||
RLESize uint32
|
RLESize uint32
|
||||||
tmp [4]byte
|
|
||||||
|
|
||||||
Type blockType
|
Type blockType
|
||||||
|
|
||||||
@@ -136,7 +140,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
b.Type = blockType((bh >> 1) & 3)
|
b.Type = blockType((bh >> 1) & 3)
|
||||||
// find size.
|
// find size.
|
||||||
cSize := int(bh >> 3)
|
cSize := int(bh >> 3)
|
||||||
maxSize := maxBlockSize
|
maxSize := maxCompressedBlockSizeAlloc
|
||||||
switch b.Type {
|
switch b.Type {
|
||||||
case blockTypeReserved:
|
case blockTypeReserved:
|
||||||
return ErrReservedBlockType
|
return ErrReservedBlockType
|
||||||
@@ -157,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
println("Data size on stream:", cSize)
|
println("Data size on stream:", cSize)
|
||||||
}
|
}
|
||||||
b.RLESize = 0
|
b.RLESize = 0
|
||||||
maxSize = maxCompressedBlockSize
|
maxSize = maxCompressedBlockSizeAlloc
|
||||||
if windowSize < maxCompressedBlockSize && b.lowMem {
|
if windowSize < maxCompressedBlockSize && b.lowMem {
|
||||||
maxSize = int(windowSize)
|
maxSize = int(windowSize) + compressedBlockOverAlloc
|
||||||
}
|
}
|
||||||
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
|
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@@ -190,9 +194,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
// Read block data.
|
// Read block data.
|
||||||
if cap(b.dataStorage) < cSize {
|
if cap(b.dataStorage) < cSize {
|
||||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||||
b.dataStorage = make([]byte, 0, cSize)
|
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
|
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cap(b.dst) <= maxSize {
|
if cap(b.dst) <= maxSize {
|
||||||
@@ -360,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
}
|
}
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
b.literalBuf = make([]byte, litRegenSize)
|
b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
if litRegenSize > maxCompressedLiteralSize {
|
b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
|
||||||
// Exceptional
|
|
||||||
b.literalBuf = make([]byte, litRegenSize)
|
|
||||||
} else {
|
|
||||||
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
literals = b.literalBuf[:litRegenSize]
|
literals = b.literalBuf[:litRegenSize]
|
||||||
@@ -397,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
// Ensure we have space to store it.
|
// Ensure we have space to store it.
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
// Use our out buffer.
|
// Use our out buffer.
|
||||||
huff.MaxDecodedSize = maxCompressedBlockSize
|
huff.MaxDecodedSize = litRegenSize
|
||||||
if fourStreams {
|
if fourStreams {
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
} else {
|
} else {
|
||||||
@@ -429,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
// Ensure we have space to store it.
|
// Ensure we have space to store it.
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
|
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
huff := hist.huffTree
|
huff := hist.huffTree
|
||||||
@@ -448,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
return in, err
|
return in, err
|
||||||
}
|
}
|
||||||
hist.huffTree = huff
|
hist.huffTree = huff
|
||||||
huff.MaxDecodedSize = maxCompressedBlockSize
|
huff.MaxDecodedSize = litRegenSize
|
||||||
// Use our out buffer.
|
// Use our out buffer.
|
||||||
if fourStreams {
|
if fourStreams {
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
@@ -463,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
if len(literals) != litRegenSize {
|
if len(literals) != litRegenSize {
|
||||||
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||||
}
|
}
|
||||||
|
// Re-cap to get extra size.
|
||||||
|
literals = b.literalBuf[:len(literals)]
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
||||||
}
|
}
|
||||||
@@ -486,10 +487,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
|||||||
b.dst = append(b.dst, hist.decoders.literals...)
|
b.dst = append(b.dst, hist.decoders.literals...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = hist.decoders.decodeSync(hist)
|
before := len(hist.decoders.out)
|
||||||
|
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if hist.decoders.maxSyncLen > 0 {
|
||||||
|
hist.decoders.maxSyncLen += uint64(before)
|
||||||
|
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
|
||||||
|
}
|
||||||
b.dst = hist.decoders.out
|
b.dst = hist.decoders.out
|
||||||
hist.recentOffsets = hist.decoders.prevOffset
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
return nil
|
return nil
|
||||||
@@ -632,6 +638,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||||||
println("initializing sequences:", err)
|
println("initializing sequences:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Extract blocks...
|
||||||
|
if false && hist.dict == nil {
|
||||||
|
fatalErr := func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
|
||||||
|
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||||
|
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||||
|
buf.Write(in)
|
||||||
|
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -650,6 +672,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
|
|||||||
}
|
}
|
||||||
hist.decoders.windowSize = hist.windowSize
|
hist.decoders.windowSize = hist.windowSize
|
||||||
hist.decoders.prevOffset = hist.recentOffsets
|
hist.decoders.prevOffset = hist.recentOffsets
|
||||||
|
|
||||||
err := hist.decoders.decode(b.sequence)
|
err := hist.decoders.decode(b.sequence)
|
||||||
hist.recentOffsets = hist.decoders.prevOffset
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
return err
|
return err
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *byteBuf) remain() []byte {
|
|
||||||
return *b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *byteBuf) readByte() (byte, error) {
|
func (b *byteBuf) readByte() (byte, error) {
|
||||||
bb := *b
|
bb := *b
|
||||||
if len(bb) < 1 {
|
if len(bb) < 1 {
|
||||||
|
6
vendor/github.com/klauspost/compress/zstd/bytereader.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/bytereader.go
generated
vendored
@@ -13,12 +13,6 @@ type byteReader struct {
|
|||||||
off int
|
off int
|
||||||
}
|
}
|
||||||
|
|
||||||
// init will initialize the reader and set the input.
|
|
||||||
func (b *byteReader) init(in []byte) {
|
|
||||||
b.b = in
|
|
||||||
b.off = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance the stream b n bytes.
|
// advance the stream b n bytes.
|
||||||
func (b *byteReader) advance(n uint) {
|
func (b *byteReader) advance(n uint) {
|
||||||
b.off += int(n)
|
b.off += int(n)
|
||||||
|
121
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
121
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
frame.history.setDict(&dict)
|
frame.history.setDict(&dict)
|
||||||
}
|
}
|
||||||
|
if frame.WindowSize > d.o.maxWindowSize {
|
||||||
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
return dst, ErrWindowSizeExceeded
|
||||||
return dst, ErrDecoderSizeExceeded
|
|
||||||
}
|
}
|
||||||
if frame.FrameContentSize < 1<<30 {
|
if frame.FrameContentSize != fcsUnknown {
|
||||||
// Never preallocate more than 1 GB up front.
|
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||||
|
return dst, ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
|
||||||
copy(dst2, dst)
|
copy(dst2, dst)
|
||||||
dst = dst2
|
dst = dst2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cap(dst) == 0 {
|
if cap(dst) == 0 {
|
||||||
// Allocate len(input) * 2 by default if nothing is provided
|
// Allocate len(input) * 2 by default if nothing is provided
|
||||||
// and we didn't get frame content size.
|
// and we didn't get frame content size.
|
||||||
@@ -437,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(next.b) > 0 {
|
if !d.o.ignoreChecksum && len(next.b) > 0 {
|
||||||
n, err := d.current.crc.Write(next.b)
|
n, err := d.current.crc.Write(next.b)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if n != len(next.b) {
|
if n != len(next.b) {
|
||||||
@@ -449,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
got := d.current.crc.Sum64()
|
got := d.current.crc.Sum64()
|
||||||
var tmp [4]byte
|
var tmp [4]byte
|
||||||
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
||||||
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
|
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
||||||
}
|
}
|
||||||
@@ -533,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
|||||||
|
|
||||||
// Update/Check CRC
|
// Update/Check CRC
|
||||||
if d.frame.HasCheckSum {
|
if d.frame.HasCheckSum {
|
||||||
d.frame.crc.Write(d.current.b)
|
if !d.o.ignoreChecksum {
|
||||||
|
d.frame.crc.Write(d.current.b)
|
||||||
|
}
|
||||||
if d.current.d.Last {
|
if d.current.d.Last {
|
||||||
d.current.err = d.frame.checkCRC()
|
if !d.o.ignoreChecksum {
|
||||||
|
d.current.err = d.frame.checkCRC()
|
||||||
|
} else {
|
||||||
|
d.current.err = d.frame.consumeCRC()
|
||||||
|
}
|
||||||
if d.current.err != nil {
|
if d.current.err != nil {
|
||||||
println("CRC error:", d.current.err)
|
println("CRC error:", d.current.err)
|
||||||
return false
|
return false
|
||||||
@@ -629,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
|
|||||||
|
|
||||||
// Create Decoder:
|
// Create Decoder:
|
||||||
// ASYNC:
|
// ASYNC:
|
||||||
// Spawn 4 go routines.
|
// Spawn 3 go routines.
|
||||||
// 0: Read frames and decode blocks.
|
// 0: Read frames and decode block literals.
|
||||||
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
|
// 1: Decode sequences.
|
||||||
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
|
// 2: Execute sequences, send to output.
|
||||||
// 3: Wait for stream history, execute sequences, send stream history.
|
|
||||||
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
|
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
|
||||||
defer d.streamWg.Done()
|
defer d.streamWg.Done()
|
||||||
br := readerWrapper{r: r}
|
br := readerWrapper{r: r}
|
||||||
|
|
||||||
var seqPrepare = make(chan *blockDec, d.o.concurrent)
|
|
||||||
var seqDecode = make(chan *blockDec, d.o.concurrent)
|
var seqDecode = make(chan *blockDec, d.o.concurrent)
|
||||||
var seqExecute = make(chan *blockDec, d.o.concurrent)
|
var seqExecute = make(chan *blockDec, d.o.concurrent)
|
||||||
|
|
||||||
// Async 1: Prepare blocks...
|
// Async 1: Decode sequences...
|
||||||
go func() {
|
|
||||||
var hist history
|
|
||||||
var hasErr bool
|
|
||||||
for block := range seqPrepare {
|
|
||||||
if hasErr {
|
|
||||||
if block != nil {
|
|
||||||
seqDecode <- block
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if block.async.newHist != nil {
|
|
||||||
if debugDecoder {
|
|
||||||
println("Async 1: new history")
|
|
||||||
}
|
|
||||||
hist.reset()
|
|
||||||
if block.async.newHist.dict != nil {
|
|
||||||
hist.setDict(block.async.newHist.dict)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if block.err != nil || block.Type != blockTypeCompressed {
|
|
||||||
hasErr = block.err != nil
|
|
||||||
seqDecode <- block
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
remain, err := block.decodeLiterals(block.data, &hist)
|
|
||||||
block.err = err
|
|
||||||
hasErr = block.err != nil
|
|
||||||
if err == nil {
|
|
||||||
block.async.literals = hist.decoders.literals
|
|
||||||
block.async.seqData = remain
|
|
||||||
} else if debugDecoder {
|
|
||||||
println("decodeLiterals error:", err)
|
|
||||||
}
|
|
||||||
seqDecode <- block
|
|
||||||
}
|
|
||||||
close(seqDecode)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Async 2: Decode sequences...
|
|
||||||
go func() {
|
go func() {
|
||||||
var hist history
|
var hist history
|
||||||
var hasErr bool
|
var hasErr bool
|
||||||
@@ -696,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
}
|
}
|
||||||
if block.async.newHist != nil {
|
if block.async.newHist != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
|
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
|
||||||
}
|
}
|
||||||
hist.decoders = block.async.newHist.decoders
|
hist.decoders = block.async.newHist.decoders
|
||||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||||
@@ -750,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
}
|
}
|
||||||
if block.async.newHist != nil {
|
if block.async.newHist != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Async 3: new history")
|
println("Async 2: new history")
|
||||||
}
|
}
|
||||||
hist.windowSize = block.async.newHist.windowSize
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||||
@@ -837,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
|
|
||||||
decodeStream:
|
decodeStream:
|
||||||
for {
|
for {
|
||||||
|
var hist history
|
||||||
|
var hasErr bool
|
||||||
|
|
||||||
|
decodeBlock := func(block *blockDec) {
|
||||||
|
if hasErr {
|
||||||
|
if block != nil {
|
||||||
|
seqDecode <- block
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if block.err != nil || block.Type != blockTypeCompressed {
|
||||||
|
hasErr = block.err != nil
|
||||||
|
seqDecode <- block
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
remain, err := block.decodeLiterals(block.data, &hist)
|
||||||
|
block.err = err
|
||||||
|
hasErr = block.err != nil
|
||||||
|
if err == nil {
|
||||||
|
block.async.literals = hist.decoders.literals
|
||||||
|
block.async.seqData = remain
|
||||||
|
} else if debugDecoder {
|
||||||
|
println("decodeLiterals error:", err)
|
||||||
|
}
|
||||||
|
seqDecode <- block
|
||||||
|
}
|
||||||
frame := d.frame
|
frame := d.frame
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("New frame...")
|
println("New frame...")
|
||||||
@@ -863,7 +856,7 @@ decodeStream:
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case dec := <-d.decoders:
|
case dec := <-d.decoders:
|
||||||
dec.sendErr(err)
|
dec.sendErr(err)
|
||||||
seqPrepare <- dec
|
decodeBlock(dec)
|
||||||
}
|
}
|
||||||
break decodeStream
|
break decodeStream
|
||||||
}
|
}
|
||||||
@@ -883,6 +876,10 @@ decodeStream:
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Alloc History:", h.allocFrameBuffer)
|
println("Alloc History:", h.allocFrameBuffer)
|
||||||
}
|
}
|
||||||
|
hist.reset()
|
||||||
|
if h.dict != nil {
|
||||||
|
hist.setDict(h.dict)
|
||||||
|
}
|
||||||
dec.async.newHist = &h
|
dec.async.newHist = &h
|
||||||
dec.async.fcs = frame.FrameContentSize
|
dec.async.fcs = frame.FrameContentSize
|
||||||
historySent = true
|
historySent = true
|
||||||
@@ -909,7 +906,7 @@ decodeStream:
|
|||||||
}
|
}
|
||||||
err = dec.err
|
err = dec.err
|
||||||
last := dec.Last
|
last := dec.Last
|
||||||
seqPrepare <- dec
|
decodeBlock(dec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break decodeStream
|
break decodeStream
|
||||||
}
|
}
|
||||||
@@ -918,7 +915,7 @@ decodeStream:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(seqPrepare)
|
close(seqDecode)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
d.frame.history.b = frameHistCache
|
d.frame.history.b = frameHistCache
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@@ -19,6 +19,7 @@ type decoderOptions struct {
|
|||||||
maxDecodedSize uint64
|
maxDecodedSize uint64
|
||||||
maxWindowSize uint64
|
maxWindowSize uint64
|
||||||
dicts []dict
|
dicts []dict
|
||||||
|
ignoreChecksum bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *decoderOptions) setDefault() {
|
func (o *decoderOptions) setDefault() {
|
||||||
@@ -31,7 +32,7 @@ func (o *decoderOptions) setDefault() {
|
|||||||
if o.concurrent > 4 {
|
if o.concurrent > 4 {
|
||||||
o.concurrent = 4
|
o.concurrent = 4
|
||||||
}
|
}
|
||||||
o.maxDecodedSize = 1 << 63
|
o.maxDecodedSize = 64 << 30
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDecoderLowmem will set whether to use a lower amount of memory,
|
// WithDecoderLowmem will set whether to use a lower amount of memory,
|
||||||
@@ -66,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption {
|
|||||||
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
|
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
|
||||||
// non-streaming operations or maximum window size for streaming operations.
|
// non-streaming operations or maximum window size for streaming operations.
|
||||||
// This can be used to control memory usage of potentially hostile content.
|
// This can be used to control memory usage of potentially hostile content.
|
||||||
// Maximum and default is 1 << 63 bytes.
|
// Maximum is 1 << 63 bytes. Default is 64GiB.
|
||||||
func WithDecoderMaxMemory(n uint64) DOption {
|
func WithDecoderMaxMemory(n uint64) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
@@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IgnoreChecksum allows to forcibly ignore checksum checking.
|
||||||
|
func IgnoreChecksum(b bool) DOption {
|
||||||
|
return func(o *decoderOptions) error {
|
||||||
|
o.ignoreChecksum = b
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
8
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@@ -156,8 +156,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
|
||||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||||
|
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
@@ -518,8 +518,8 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
|
||||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||||
|
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||||
|
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
@@ -674,8 +674,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
|
||||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||||
|
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
@@ -1047,8 +1047,8 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
|
||||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||||
|
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||||
|
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
|
10
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
10
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@@ -127,8 +127,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
|
||||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||||
|
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
@@ -439,8 +439,8 @@ encodeLoop:
|
|||||||
var t int32
|
var t int32
|
||||||
for {
|
for {
|
||||||
|
|
||||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
|
||||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||||
|
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
@@ -785,8 +785,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
|
||||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||||
|
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
@@ -969,7 +969,7 @@ encodeLoop:
|
|||||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||||
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
|
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
|
||||||
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
|
longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
|
||||||
e.longTable[longHash1] = te0
|
e.longTable[longHash1] = te0
|
||||||
e.longTable[longHash2] = te1
|
e.longTable[longHash2] = te1
|
||||||
e.markLongShardDirty(longHash1)
|
e.markLongShardDirty(longHash1)
|
||||||
@@ -1002,8 +1002,8 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
|
||||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||||
|
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||||
|
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we can do everything in one block, prefer that.
|
// If we can do everything in one block, prefer that.
|
||||||
if len(src) <= maxCompressedBlockSize {
|
if len(src) <= e.o.blockSize {
|
||||||
enc.Reset(e.o.dict, true)
|
enc.Reset(e.o.dict, true)
|
||||||
// Slightly faster with no history and everything in one block.
|
// Slightly faster with no history and everything in one block.
|
||||||
if e.o.crc {
|
if e.o.crc {
|
||||||
|
70
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
70
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
return ErrWindowSizeTooSmall
|
return ErrWindowSizeTooSmall
|
||||||
}
|
}
|
||||||
d.history.windowSize = int(d.WindowSize)
|
d.history.windowSize = int(d.WindowSize)
|
||||||
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
||||||
|
// Alloc 2x window size if not low-mem, or very small window size.
|
||||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||||
// TODO: Maybe use FrameContent size
|
|
||||||
} else {
|
} else {
|
||||||
|
// Alloc with one additional block
|
||||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error {
|
|||||||
if !d.HasCheckSum {
|
if !d.HasCheckSum {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var tmp [4]byte
|
|
||||||
got := d.crc.Sum64()
|
|
||||||
// Flip to match file order.
|
|
||||||
tmp[0] = byte(got >> 0)
|
|
||||||
tmp[1] = byte(got >> 8)
|
|
||||||
tmp[2] = byte(got >> 16)
|
|
||||||
tmp[3] = byte(got >> 24)
|
|
||||||
|
|
||||||
// We can overwrite upper tmp now
|
// We can overwrite upper tmp now
|
||||||
want, err := d.rawInput.readSmall(4)
|
want, err := d.rawInput.readSmall(4)
|
||||||
@@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
|
if d.o.ignoreChecksum {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var tmp [4]byte
|
||||||
|
got := d.crc.Sum64()
|
||||||
|
// Flip to match file order.
|
||||||
|
tmp[0] = byte(got >> 0)
|
||||||
|
tmp[1] = byte(got >> 8)
|
||||||
|
tmp[2] = byte(got >> 16)
|
||||||
|
tmp[3] = byte(got >> 24)
|
||||||
|
|
||||||
|
if !bytes.Equal(tmp[:], want) {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
println("CRC Check Failed:", tmp[:], "!=", want)
|
||||||
}
|
}
|
||||||
@@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// consumeCRC reads the checksum data if the frame has one.
|
||||||
|
func (d *frameDec) consumeCRC() error {
|
||||||
|
if d.HasCheckSum {
|
||||||
|
_, err := d.rawInput.readSmall(4)
|
||||||
|
if err != nil {
|
||||||
|
println("CRC missing?", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// runDecoder will create a sync decoder that will decode a block of data.
|
// runDecoder will create a sync decoder that will decode a block of data.
|
||||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
saved := d.history.b
|
saved := d.history.b
|
||||||
@@ -326,6 +345,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
d.history.ignoreBuffer = len(dst)
|
d.history.ignoreBuffer = len(dst)
|
||||||
// Store input length, so we only check new data.
|
// Store input length, so we only check new data.
|
||||||
crcStart := len(dst)
|
crcStart := len(dst)
|
||||||
|
d.history.decoders.maxSyncLen = 0
|
||||||
|
if d.FrameContentSize != fcsUnknown {
|
||||||
|
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||||
|
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
||||||
|
return dst, ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
|
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||||
|
// Alloc for output
|
||||||
|
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
||||||
|
copy(dst2, dst)
|
||||||
|
dst = dst2
|
||||||
|
}
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
for {
|
for {
|
||||||
err = dec.reset(d.rawInput, d.WindowSize)
|
err = dec.reset(d.rawInput, d.WindowSize)
|
||||||
@@ -360,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||||
err = ErrFrameSizeMismatch
|
err = ErrFrameSizeMismatch
|
||||||
} else if d.HasCheckSum {
|
} else if d.HasCheckSum {
|
||||||
var n int
|
if d.o.ignoreChecksum {
|
||||||
n, err = d.crc.Write(dst[crcStart:])
|
err = d.consumeCRC()
|
||||||
if err == nil {
|
} else {
|
||||||
if n != len(dst)-crcStart {
|
var n int
|
||||||
err = io.ErrShortWrite
|
n, err = d.crc.Write(dst[crcStart:])
|
||||||
} else {
|
if err == nil {
|
||||||
err = d.checkCRC()
|
if n != len(dst)-crcStart {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
} else {
|
||||||
|
err = d.checkCRC()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
128
vendor/github.com/klauspost/compress/zstd/fse_decoder.go
generated
vendored
128
vendor/github.com/klauspost/compress/zstd/fse_decoder.go
generated
vendored
@@ -5,8 +5,10 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
|
|||||||
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
|
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
|
||||||
}
|
}
|
||||||
b.advance((bitCount + 7) >> 3)
|
b.advance((bitCount + 7) >> 3)
|
||||||
// println(s.norm[:s.symbolLen], s.symbolLen)
|
|
||||||
return s.buildDtable()
|
return s.buildDtable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *fseDecoder) mustReadFrom(r io.Reader) {
|
||||||
|
fatalErr := func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// dt [maxTablesize]decSymbol // Decompression table.
|
||||||
|
// symbolLen uint16 // Length of active part of the symbol table.
|
||||||
|
// actualTableLog uint8 // Selected tablelog.
|
||||||
|
// maxBits uint8 // Maximum number of additional bits
|
||||||
|
// // used for table creation to avoid allocations.
|
||||||
|
// stateTable [256]uint16
|
||||||
|
// norm [maxSymbolValue + 1]int16
|
||||||
|
// preDefined bool
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
|
||||||
|
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
|
||||||
|
}
|
||||||
|
|
||||||
// decSymbol contains information about a state entry,
|
// decSymbol contains information about a state entry,
|
||||||
// Including the state offset base, the output symbol and
|
// Including the state offset base, the output symbol and
|
||||||
// the number of bits to read for the low part of the destination state.
|
// the number of bits to read for the low part of the destination state.
|
||||||
@@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 {
|
|||||||
return uint16(d >> 16)
|
return uint16(d >> 16)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decSymbol) baseline() uint32 {
|
|
||||||
return uint32(d >> 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d decSymbol) baselineInt() int {
|
func (d decSymbol) baselineInt() int {
|
||||||
return int(d >> 32)
|
return int(d >> 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
|
|
||||||
*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decSymbol) setNBits(nBits uint8) {
|
func (d *decSymbol) setNBits(nBits uint8) {
|
||||||
const mask = 0xffffffffffffff00
|
const mask = 0xffffffffffffff00
|
||||||
*d = (*d & mask) | decSymbol(nBits)
|
*d = (*d & mask) | decSymbol(nBits)
|
||||||
@@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) {
|
|||||||
*d = (*d & mask) | decSymbol(state)<<16
|
*d = (*d & mask) | decSymbol(state)<<16
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decSymbol) setBaseline(baseline uint32) {
|
|
||||||
const mask = 0xffffffff
|
|
||||||
*d = (*d & mask) | decSymbol(baseline)<<32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
|
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
|
||||||
const mask = 0xffff00ff
|
const mask = 0xffff00ff
|
||||||
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
|
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
|
||||||
@@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
|
|||||||
s.dt[0] = symbol
|
s.dt[0] = symbol
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildDtable will build the decoding table.
|
|
||||||
func (s *fseDecoder) buildDtable() error {
|
|
||||||
tableSize := uint32(1 << s.actualTableLog)
|
|
||||||
highThreshold := tableSize - 1
|
|
||||||
symbolNext := s.stateTable[:256]
|
|
||||||
|
|
||||||
// Init, lay down lowprob symbols
|
|
||||||
{
|
|
||||||
for i, v := range s.norm[:s.symbolLen] {
|
|
||||||
if v == -1 {
|
|
||||||
s.dt[highThreshold].setAddBits(uint8(i))
|
|
||||||
highThreshold--
|
|
||||||
symbolNext[i] = 1
|
|
||||||
} else {
|
|
||||||
symbolNext[i] = uint16(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Spread symbols
|
|
||||||
{
|
|
||||||
tableMask := tableSize - 1
|
|
||||||
step := tableStep(tableSize)
|
|
||||||
position := uint32(0)
|
|
||||||
for ss, v := range s.norm[:s.symbolLen] {
|
|
||||||
for i := 0; i < int(v); i++ {
|
|
||||||
s.dt[position].setAddBits(uint8(ss))
|
|
||||||
position = (position + step) & tableMask
|
|
||||||
for position > highThreshold {
|
|
||||||
// lowprob area
|
|
||||||
position = (position + step) & tableMask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if position != 0 {
|
|
||||||
// position must reach all cells once, otherwise normalizedCounter is incorrect
|
|
||||||
return errors.New("corrupted input (position != 0)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build Decoding table
|
|
||||||
{
|
|
||||||
tableSize := uint16(1 << s.actualTableLog)
|
|
||||||
for u, v := range s.dt[:tableSize] {
|
|
||||||
symbol := v.addBits()
|
|
||||||
nextState := symbolNext[symbol]
|
|
||||||
symbolNext[symbol] = nextState + 1
|
|
||||||
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
|
|
||||||
s.dt[u&maxTableMask].setNBits(nBits)
|
|
||||||
newState := (nextState << nBits) - tableSize
|
|
||||||
if newState > tableSize {
|
|
||||||
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
|
|
||||||
}
|
|
||||||
if newState == uint16(u) && nBits == 0 {
|
|
||||||
// Seems weird that this is possible with nbits > 0.
|
|
||||||
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
|
|
||||||
}
|
|
||||||
s.dt[u&maxTableMask].setNewState(newState)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// transform will transform the decoder table into a table usable for
|
// transform will transform the decoder table into a table usable for
|
||||||
// decoding without having to apply the transformation while decoding.
|
// decoding without having to apply the transformation while decoding.
|
||||||
// The state will contain the base value and the number of bits to read.
|
// The state will contain the base value and the number of bits to read.
|
||||||
@@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
|
|||||||
s.state = dt[br.getBits(tableLog)]
|
s.state = dt[br.getBits(tableLog)]
|
||||||
}
|
}
|
||||||
|
|
||||||
// next returns the current symbol and sets the next state.
|
|
||||||
// At least tablelog bits must be available in the bit reader.
|
|
||||||
func (s *fseState) next(br *bitReader) {
|
|
||||||
lowBits := uint16(br.getBits(s.state.nbBits()))
|
|
||||||
s.state = s.dt[s.state.newState()+lowBits]
|
|
||||||
}
|
|
||||||
|
|
||||||
// finished returns true if all bits have been read from the bitstream
|
|
||||||
// and the next state would require reading bits from the input.
|
|
||||||
func (s *fseState) finished(br *bitReader) bool {
|
|
||||||
return br.finished() && s.state.nbBits() > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// final returns the current state symbol without decoding the next.
|
|
||||||
func (s *fseState) final() (int, uint8) {
|
|
||||||
return s.state.baselineInt(), s.state.addBits()
|
|
||||||
}
|
|
||||||
|
|
||||||
// final returns the current state symbol without decoding the next.
|
// final returns the current state symbol without decoding the next.
|
||||||
func (s decSymbol) final() (int, uint8) {
|
func (s decSymbol) final() (int, uint8) {
|
||||||
return s.baselineInt(), s.addBits()
|
return s.baselineInt(), s.addBits()
|
||||||
}
|
}
|
||||||
|
|
||||||
// nextFast returns the next symbol and sets the next state.
|
|
||||||
// This can only be used if no symbols are 0 bits.
|
|
||||||
// At least tablelog bits must be available in the bit reader.
|
|
||||||
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
|
|
||||||
lowBits := br.get16BitsFast(s.state.nbBits())
|
|
||||||
s.state = s.dt[s.state.newState()+lowBits]
|
|
||||||
return s.state.baseline(), s.state.addBits()
|
|
||||||
}
|
|
||||||
|
64
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
Normal file
64
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type buildDtableAsmContext struct {
|
||||||
|
// inputs
|
||||||
|
stateTable *uint16
|
||||||
|
norm *int16
|
||||||
|
dt *uint64
|
||||||
|
|
||||||
|
// outputs --- set by the procedure in the case of error;
|
||||||
|
// for interpretation please see the error handling part below
|
||||||
|
errParam1 uint64
|
||||||
|
errParam2 uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
|
||||||
|
// Function returns non-zero exit code on error.
|
||||||
|
// go:noescape
|
||||||
|
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||||
|
|
||||||
|
// please keep in sync with _generate/gen_fse.go
|
||||||
|
const (
|
||||||
|
errorCorruptedNormalizedCounter = 1
|
||||||
|
errorNewStateTooBig = 2
|
||||||
|
errorNewStateNoBits = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// buildDtable will build the decoding table.
|
||||||
|
func (s *fseDecoder) buildDtable() error {
|
||||||
|
ctx := buildDtableAsmContext{
|
||||||
|
stateTable: (*uint16)(&s.stateTable[0]),
|
||||||
|
norm: (*int16)(&s.norm[0]),
|
||||||
|
dt: (*uint64)(&s.dt[0]),
|
||||||
|
}
|
||||||
|
code := buildDtable_asm(s, &ctx)
|
||||||
|
|
||||||
|
if code != 0 {
|
||||||
|
switch code {
|
||||||
|
case errorCorruptedNormalizedCounter:
|
||||||
|
position := ctx.errParam1
|
||||||
|
return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
|
||||||
|
|
||||||
|
case errorNewStateTooBig:
|
||||||
|
newState := decSymbol(ctx.errParam1)
|
||||||
|
size := ctx.errParam2
|
||||||
|
return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
|
||||||
|
|
||||||
|
case errorNewStateNoBits:
|
||||||
|
newState := decSymbol(ctx.errParam1)
|
||||||
|
oldState := decSymbol(ctx.errParam2)
|
||||||
|
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
127
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
Normal file
127
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build !appengine && !noasm && gc && !noasm
|
||||||
|
// +build !appengine,!noasm,gc,!noasm
|
||||||
|
|
||||||
|
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||||
|
TEXT ·buildDtable_asm(SB), $0-24
|
||||||
|
MOVQ ctx+8(FP), CX
|
||||||
|
MOVQ s+0(FP), DI
|
||||||
|
|
||||||
|
// Load values
|
||||||
|
MOVBQZX 4098(DI), DX
|
||||||
|
XORQ AX, AX
|
||||||
|
BTSQ DX, AX
|
||||||
|
MOVQ (CX), BX
|
||||||
|
MOVQ 16(CX), SI
|
||||||
|
LEAQ -1(AX), R8
|
||||||
|
MOVQ 8(CX), CX
|
||||||
|
MOVWQZX 4096(DI), DI
|
||||||
|
|
||||||
|
// End load values
|
||||||
|
// Init, lay down lowprob symbols
|
||||||
|
XORQ R9, R9
|
||||||
|
JMP init_main_loop_condition
|
||||||
|
|
||||||
|
init_main_loop:
|
||||||
|
MOVWQSX (CX)(R9*2), R10
|
||||||
|
CMPW R10, $-1
|
||||||
|
JNE do_not_update_high_threshold
|
||||||
|
MOVB R9, 1(SI)(R8*8)
|
||||||
|
DECQ R8
|
||||||
|
MOVQ $0x0000000000000001, R10
|
||||||
|
|
||||||
|
do_not_update_high_threshold:
|
||||||
|
MOVW R10, (BX)(R9*2)
|
||||||
|
INCQ R9
|
||||||
|
|
||||||
|
init_main_loop_condition:
|
||||||
|
CMPQ R9, DI
|
||||||
|
JL init_main_loop
|
||||||
|
|
||||||
|
// Spread symbols
|
||||||
|
// Calculate table step
|
||||||
|
MOVQ AX, R9
|
||||||
|
SHRQ $0x01, R9
|
||||||
|
MOVQ AX, R10
|
||||||
|
SHRQ $0x03, R10
|
||||||
|
LEAQ 3(R9)(R10*1), R9
|
||||||
|
|
||||||
|
// Fill add bits values
|
||||||
|
LEAQ -1(AX), R10
|
||||||
|
XORQ R11, R11
|
||||||
|
XORQ R12, R12
|
||||||
|
JMP spread_main_loop_condition
|
||||||
|
|
||||||
|
spread_main_loop:
|
||||||
|
XORQ R13, R13
|
||||||
|
MOVWQSX (CX)(R12*2), R14
|
||||||
|
JMP spread_inner_loop_condition
|
||||||
|
|
||||||
|
spread_inner_loop:
|
||||||
|
MOVB R12, 1(SI)(R11*8)
|
||||||
|
|
||||||
|
adjust_position:
|
||||||
|
ADDQ R9, R11
|
||||||
|
ANDQ R10, R11
|
||||||
|
CMPQ R11, R8
|
||||||
|
JG adjust_position
|
||||||
|
INCQ R13
|
||||||
|
|
||||||
|
spread_inner_loop_condition:
|
||||||
|
CMPQ R13, R14
|
||||||
|
JL spread_inner_loop
|
||||||
|
INCQ R12
|
||||||
|
|
||||||
|
spread_main_loop_condition:
|
||||||
|
CMPQ R12, DI
|
||||||
|
JL spread_main_loop
|
||||||
|
TESTQ R11, R11
|
||||||
|
JZ spread_check_ok
|
||||||
|
MOVQ ctx+8(FP), AX
|
||||||
|
MOVQ R11, 24(AX)
|
||||||
|
MOVQ $+1, ret+16(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
spread_check_ok:
|
||||||
|
// Build Decoding table
|
||||||
|
XORQ DI, DI
|
||||||
|
|
||||||
|
build_table_main_table:
|
||||||
|
MOVBQZX 1(SI)(DI*8), CX
|
||||||
|
MOVWQZX (BX)(CX*2), R8
|
||||||
|
LEAQ 1(R8), R9
|
||||||
|
MOVW R9, (BX)(CX*2)
|
||||||
|
MOVQ R8, R9
|
||||||
|
BSRQ R9, R9
|
||||||
|
MOVQ DX, CX
|
||||||
|
SUBQ R9, CX
|
||||||
|
SHLQ CL, R8
|
||||||
|
SUBQ AX, R8
|
||||||
|
MOVB CL, (SI)(DI*8)
|
||||||
|
MOVW R8, 2(SI)(DI*8)
|
||||||
|
CMPQ R8, AX
|
||||||
|
JLE build_table_check1_ok
|
||||||
|
MOVQ ctx+8(FP), CX
|
||||||
|
MOVQ R8, 24(CX)
|
||||||
|
MOVQ AX, 32(CX)
|
||||||
|
MOVQ $+2, ret+16(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
build_table_check1_ok:
|
||||||
|
TESTB CL, CL
|
||||||
|
JNZ build_table_check2_ok
|
||||||
|
CMPW R8, DI
|
||||||
|
JNE build_table_check2_ok
|
||||||
|
MOVQ ctx+8(FP), AX
|
||||||
|
MOVQ R8, 24(AX)
|
||||||
|
MOVQ DI, 32(AX)
|
||||||
|
MOVQ $+3, ret+16(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
build_table_check2_ok:
|
||||||
|
INCQ DI
|
||||||
|
CMPQ DI, AX
|
||||||
|
JL build_table_main_table
|
||||||
|
MOVQ $+0, ret+16(FP)
|
||||||
|
RET
|
72
vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
generated
vendored
Normal file
72
vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
//go:build !amd64 || appengine || !gc || noasm
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// buildDtable will build the decoding table.
|
||||||
|
func (s *fseDecoder) buildDtable() error {
|
||||||
|
tableSize := uint32(1 << s.actualTableLog)
|
||||||
|
highThreshold := tableSize - 1
|
||||||
|
symbolNext := s.stateTable[:256]
|
||||||
|
|
||||||
|
// Init, lay down lowprob symbols
|
||||||
|
{
|
||||||
|
for i, v := range s.norm[:s.symbolLen] {
|
||||||
|
if v == -1 {
|
||||||
|
s.dt[highThreshold].setAddBits(uint8(i))
|
||||||
|
highThreshold--
|
||||||
|
symbolNext[i] = 1
|
||||||
|
} else {
|
||||||
|
symbolNext[i] = uint16(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spread symbols
|
||||||
|
{
|
||||||
|
tableMask := tableSize - 1
|
||||||
|
step := tableStep(tableSize)
|
||||||
|
position := uint32(0)
|
||||||
|
for ss, v := range s.norm[:s.symbolLen] {
|
||||||
|
for i := 0; i < int(v); i++ {
|
||||||
|
s.dt[position].setAddBits(uint8(ss))
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
for position > highThreshold {
|
||||||
|
// lowprob area
|
||||||
|
position = (position + step) & tableMask
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if position != 0 {
|
||||||
|
// position must reach all cells once, otherwise normalizedCounter is incorrect
|
||||||
|
return errors.New("corrupted input (position != 0)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build Decoding table
|
||||||
|
{
|
||||||
|
tableSize := uint16(1 << s.actualTableLog)
|
||||||
|
for u, v := range s.dt[:tableSize] {
|
||||||
|
symbol := v.addBits()
|
||||||
|
nextState := symbolNext[symbol]
|
||||||
|
symbolNext[symbol] = nextState + 1
|
||||||
|
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
|
||||||
|
s.dt[u&maxTableMask].setNBits(nBits)
|
||||||
|
newState := (nextState << nBits) - tableSize
|
||||||
|
if newState > tableSize {
|
||||||
|
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
|
||||||
|
}
|
||||||
|
if newState == uint16(u) && nBits == 0 {
|
||||||
|
// Seems weird that this is possible with nbits > 0.
|
||||||
|
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
|
||||||
|
}
|
||||||
|
s.dt[u&maxTableMask].setNewState(newState)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
23
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
23
vendor/github.com/klauspost/compress/zstd/fse_encoder.go
generated
vendored
@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
|
|||||||
s.clearCount = maxCount != 0
|
s.clearCount = maxCount != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare will prepare and allocate scratch tables used for both compression and decompression.
|
|
||||||
func (s *fseEncoder) prepare() (*fseEncoder, error) {
|
|
||||||
if s == nil {
|
|
||||||
s = &fseEncoder{}
|
|
||||||
}
|
|
||||||
s.useRLE = false
|
|
||||||
if s.clearCount && s.maxCount == 0 {
|
|
||||||
for i := range s.count {
|
|
||||||
s.count[i] = 0
|
|
||||||
}
|
|
||||||
s.clearCount = false
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocCtable will allocate tables needed for compression.
|
// allocCtable will allocate tables needed for compression.
|
||||||
// If existing tables a re big enough, they are simply re-used.
|
// If existing tables a re big enough, they are simply re-used.
|
||||||
func (s *fseEncoder) allocCtable() {
|
func (s *fseEncoder) allocCtable() {
|
||||||
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
|
|||||||
c.state = c.stateTable[lu]
|
c.state = c.stateTable[lu]
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode the output symbol provided and write it to the bitstream.
|
|
||||||
func (c *cState) encode(symbolTT symbolTransform) {
|
|
||||||
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
|
|
||||||
dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
|
|
||||||
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
|
|
||||||
c.state = c.stateTable[dstState]
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush will write the tablelog to the output and flush the remaining full bytes.
|
// flush will write the tablelog to the output and flush the remaining full bytes.
|
||||||
func (c *cState) flush(tableLog uint8) {
|
func (c *cState) flush(tableLog uint8) {
|
||||||
c.bw.flush32()
|
c.bw.flush32()
|
||||||
|
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
@@ -1,11 +0,0 @@
|
|||||||
//go:build ignorecrc
|
|
||||||
// +build ignorecrc
|
|
||||||
|
|
||||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
|
||||||
// License information can be found in the LICENSE file.
|
|
||||||
// Based on work by Yann Collet, released under BSD License.
|
|
||||||
|
|
||||||
package zstd
|
|
||||||
|
|
||||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
|
||||||
const ignoreCRC = true
|
|
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
@@ -1,11 +0,0 @@
|
|||||||
//go:build !ignorecrc
|
|
||||||
// +build !ignorecrc
|
|
||||||
|
|
||||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
|
||||||
// License information can be found in the LICENSE file.
|
|
||||||
// Based on work by Yann Collet, released under BSD License.
|
|
||||||
|
|
||||||
package zstd
|
|
||||||
|
|
||||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
|
||||||
const ignoreCRC = false
|
|
6
vendor/github.com/klauspost/compress/zstd/hash.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/hash.go
generated
vendored
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
|
|||||||
return (uint32(u) * prime4bytes) >> (32 - length)
|
return (uint32(u) * prime4bytes) >> (32 - length)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <32.
|
|
||||||
func hash3(u uint32, h uint8) uint32 {
|
|
||||||
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
|
|
||||||
}
|
|
||||||
|
260
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
260
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@@ -73,6 +73,7 @@ type sequenceDecs struct {
|
|||||||
seqSize int
|
seqSize int
|
||||||
windowSize int
|
windowSize int
|
||||||
maxBits uint8
|
maxBits uint8
|
||||||
|
maxSyncLen uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize all 3 decoders from the stream input.
|
// initialize all 3 decoders from the stream input.
|
||||||
@@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode sequences from the stream with the provided history.
|
|
||||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|
||||||
br := s.br
|
|
||||||
|
|
||||||
// Grab full sizes tables, to avoid bounds checks.
|
|
||||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
|
||||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
|
||||||
s.seqSize = 0
|
|
||||||
litRemain := len(s.literals)
|
|
||||||
maxBlockSize := maxCompressedBlockSize
|
|
||||||
if s.windowSize < maxBlockSize {
|
|
||||||
maxBlockSize = s.windowSize
|
|
||||||
}
|
|
||||||
for i := range seqs {
|
|
||||||
var ll, mo, ml int
|
|
||||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
|
||||||
// inlined function:
|
|
||||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
|
||||||
|
|
||||||
// Final will not read from stream.
|
|
||||||
var llB, mlB, moB uint8
|
|
||||||
ll, llB = llState.final()
|
|
||||||
ml, mlB = mlState.final()
|
|
||||||
mo, moB = ofState.final()
|
|
||||||
|
|
||||||
// extra bits are stored in reverse order.
|
|
||||||
br.fillFast()
|
|
||||||
mo += br.getBits(moB)
|
|
||||||
if s.maxBits > 32 {
|
|
||||||
br.fillFast()
|
|
||||||
}
|
|
||||||
ml += br.getBits(mlB)
|
|
||||||
ll += br.getBits(llB)
|
|
||||||
|
|
||||||
if moB > 1 {
|
|
||||||
s.prevOffset[2] = s.prevOffset[1]
|
|
||||||
s.prevOffset[1] = s.prevOffset[0]
|
|
||||||
s.prevOffset[0] = mo
|
|
||||||
} else {
|
|
||||||
// mo = s.adjustOffset(mo, ll, moB)
|
|
||||||
// Inlined for rather big speedup
|
|
||||||
if ll == 0 {
|
|
||||||
// There is an exception though, when current sequence's literals_length = 0.
|
|
||||||
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
|
||||||
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
|
||||||
mo++
|
|
||||||
}
|
|
||||||
|
|
||||||
if mo == 0 {
|
|
||||||
mo = s.prevOffset[0]
|
|
||||||
} else {
|
|
||||||
var temp int
|
|
||||||
if mo == 3 {
|
|
||||||
temp = s.prevOffset[0] - 1
|
|
||||||
} else {
|
|
||||||
temp = s.prevOffset[mo]
|
|
||||||
}
|
|
||||||
|
|
||||||
if temp == 0 {
|
|
||||||
// 0 is not valid; input is corrupted; force offset to 1
|
|
||||||
println("WARNING: temp was 0")
|
|
||||||
temp = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if mo != 1 {
|
|
||||||
s.prevOffset[2] = s.prevOffset[1]
|
|
||||||
}
|
|
||||||
s.prevOffset[1] = s.prevOffset[0]
|
|
||||||
s.prevOffset[0] = temp
|
|
||||||
mo = temp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
br.fillFast()
|
|
||||||
} else {
|
|
||||||
if br.overread() {
|
|
||||||
if debugDecoder {
|
|
||||||
printf("reading sequence %d, exceeded available data\n", i)
|
|
||||||
}
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
|
||||||
br.fill()
|
|
||||||
}
|
|
||||||
|
|
||||||
if debugSequences {
|
|
||||||
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
|
||||||
}
|
|
||||||
// Evaluate.
|
|
||||||
// We might be doing this async, so do it early.
|
|
||||||
if mo == 0 && ml > 0 {
|
|
||||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
|
||||||
}
|
|
||||||
if ml > maxMatchLen {
|
|
||||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
|
||||||
}
|
|
||||||
s.seqSize += ll + ml
|
|
||||||
if s.seqSize > maxBlockSize {
|
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
|
||||||
}
|
|
||||||
litRemain -= ll
|
|
||||||
if litRemain < 0 {
|
|
||||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
|
||||||
}
|
|
||||||
seqs[i] = seqVals{
|
|
||||||
ll: ll,
|
|
||||||
ml: ml,
|
|
||||||
mo: mo,
|
|
||||||
}
|
|
||||||
if i == len(seqs)-1 {
|
|
||||||
// This is the last sequence, so we shouldn't update state.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manually inlined, ~ 5-20% faster
|
|
||||||
// Update all 3 states at once. Approx 20% faster.
|
|
||||||
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
|
||||||
if nBits == 0 {
|
|
||||||
llState = llTable[llState.newState()&maxTableMask]
|
|
||||||
mlState = mlTable[mlState.newState()&maxTableMask]
|
|
||||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
|
||||||
} else {
|
|
||||||
bits := br.get32BitsFast(nBits)
|
|
||||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
|
||||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
|
||||||
|
|
||||||
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
|
||||||
lowBits &= bitMask[mlState.nbBits()&15]
|
|
||||||
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
|
||||||
|
|
||||||
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
|
||||||
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.seqSize += litRemain
|
|
||||||
if s.seqSize > maxBlockSize {
|
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
|
||||||
}
|
|
||||||
err := br.close()
|
|
||||||
if err != nil {
|
|
||||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute will execute the decoded sequence with the provided history.
|
// execute will execute the decoded sequence with the provided history.
|
||||||
// The sequence must be evaluated before being sent.
|
// The sequence must be evaluated before being sent.
|
||||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||||
|
if len(s.dict) == 0 {
|
||||||
|
return s.executeSimple(seqs, hist)
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure we have enough output size...
|
// Ensure we have enough output size...
|
||||||
if len(s.out)+s.seqSize > cap(s.out) {
|
if len(s.out)+s.seqSize > cap(s.out) {
|
||||||
addBytes := s.seqSize + len(s.out)
|
addBytes := s.seqSize + len(s.out)
|
||||||
@@ -327,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add final literals
|
// Add final literals
|
||||||
copy(out[t:], s.literals)
|
copy(out[t:], s.literals)
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@@ -341,14 +203,18 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// decode sequences from the stream with the provided history.
|
// decode sequences from the stream with the provided history.
|
||||||
func (s *sequenceDecs) decodeSync(history *history) error {
|
func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||||
|
supported, err := s.decodeSyncSimple(hist)
|
||||||
|
if supported {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
br := s.br
|
br := s.br
|
||||||
seqs := s.nSeqs
|
seqs := s.nSeqs
|
||||||
startSize := len(s.out)
|
startSize := len(s.out)
|
||||||
// Grab full sizes tables, to avoid bounds checks.
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
hist := history.b[history.ignoreBuffer:]
|
|
||||||
out := s.out
|
out := s.out
|
||||||
maxBlockSize := maxCompressedBlockSize
|
maxBlockSize := maxCompressedBlockSize
|
||||||
if s.windowSize < maxBlockSize {
|
if s.windowSize < maxBlockSize {
|
||||||
@@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||||||
}
|
}
|
||||||
size := ll + ml + len(out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||||
}
|
}
|
||||||
if size > cap(out) {
|
if size > cap(out) {
|
||||||
// Not enough size, which can happen under high volume block streaming conditions
|
// Not enough size, which can happen under high volume block streaming conditions
|
||||||
@@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||||||
|
|
||||||
if mo > len(out)+len(hist) || mo > s.windowSize {
|
if mo > len(out)+len(hist) || mo > s.windowSize {
|
||||||
if len(s.dict) == 0 {
|
if len(s.dict) == 0 {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we may be in dictionary.
|
// we may be in dictionary.
|
||||||
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
||||||
if dictO < 0 || dictO >= len(s.dict) {
|
if dictO < 0 || dictO >= len(s.dict) {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
|
||||||
}
|
}
|
||||||
end := dictO + ml
|
end := dictO + ml
|
||||||
if end > len(s.dict) {
|
if end > len(s.dict) {
|
||||||
@@ -530,6 +396,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||||
} else {
|
} else {
|
||||||
bits := br.get32BitsFast(nBits)
|
bits := br.get32BitsFast(nBits)
|
||||||
|
|
||||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
@@ -543,8 +410,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if space for literals
|
// Check if space for literals
|
||||||
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
|
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add final literals
|
// Add final literals
|
||||||
@@ -552,16 +419,6 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||||||
return br.close()
|
return br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// update states, at least 27 bits must be available.
|
|
||||||
func (s *sequenceDecs) update(br *bitReader) {
|
|
||||||
// Max 8 bits
|
|
||||||
s.litLengths.state.next(br)
|
|
||||||
// Max 9 bits
|
|
||||||
s.matchLengths.state.next(br)
|
|
||||||
// Max 8 bits
|
|
||||||
s.offsets.state.next(br)
|
|
||||||
}
|
|
||||||
|
|
||||||
var bitMask [16]uint16
|
var bitMask [16]uint16
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -570,87 +427,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update states, at least 27 bits must be available.
|
|
||||||
func (s *sequenceDecs) updateAlt(br *bitReader) {
|
|
||||||
// Update all 3 states at once. Approx 20% faster.
|
|
||||||
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
|
||||||
|
|
||||||
nBits := a.nbBits() + b.nbBits() + c.nbBits()
|
|
||||||
if nBits == 0 {
|
|
||||||
s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
|
|
||||||
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
|
|
||||||
s.offsets.state.state = s.offsets.state.dt[c.newState()]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bits := br.get32BitsFast(nBits)
|
|
||||||
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
|
|
||||||
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
|
|
||||||
|
|
||||||
lowBits = uint16(bits >> (c.nbBits() & 31))
|
|
||||||
lowBits &= bitMask[b.nbBits()&15]
|
|
||||||
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
|
|
||||||
|
|
||||||
lowBits = uint16(bits) & bitMask[c.nbBits()&15]
|
|
||||||
s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
|
|
||||||
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
|
|
||||||
// Final will not read from stream.
|
|
||||||
ll, llB := llState.final()
|
|
||||||
ml, mlB := mlState.final()
|
|
||||||
mo, moB := ofState.final()
|
|
||||||
|
|
||||||
// extra bits are stored in reverse order.
|
|
||||||
br.fillFast()
|
|
||||||
mo += br.getBits(moB)
|
|
||||||
if s.maxBits > 32 {
|
|
||||||
br.fillFast()
|
|
||||||
}
|
|
||||||
ml += br.getBits(mlB)
|
|
||||||
ll += br.getBits(llB)
|
|
||||||
|
|
||||||
if moB > 1 {
|
|
||||||
s.prevOffset[2] = s.prevOffset[1]
|
|
||||||
s.prevOffset[1] = s.prevOffset[0]
|
|
||||||
s.prevOffset[0] = mo
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// mo = s.adjustOffset(mo, ll, moB)
|
|
||||||
// Inlined for rather big speedup
|
|
||||||
if ll == 0 {
|
|
||||||
// There is an exception though, when current sequence's literals_length = 0.
|
|
||||||
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
|
||||||
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
|
||||||
mo++
|
|
||||||
}
|
|
||||||
|
|
||||||
if mo == 0 {
|
|
||||||
mo = s.prevOffset[0]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var temp int
|
|
||||||
if mo == 3 {
|
|
||||||
temp = s.prevOffset[0] - 1
|
|
||||||
} else {
|
|
||||||
temp = s.prevOffset[mo]
|
|
||||||
}
|
|
||||||
|
|
||||||
if temp == 0 {
|
|
||||||
// 0 is not valid; input is corrupted; force offset to 1
|
|
||||||
println("temp was 0")
|
|
||||||
temp = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if mo != 1 {
|
|
||||||
s.prevOffset[2] = s.prevOffset[1]
|
|
||||||
}
|
|
||||||
s.prevOffset[1] = s.prevOffset[0]
|
|
||||||
s.prevOffset[0] = temp
|
|
||||||
mo = temp
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
|
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
|
||||||
// Final will not read from stream.
|
// Final will not read from stream.
|
||||||
ll, llB := llState.final()
|
ll, llB := llState.final()
|
||||||
|
362
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
Normal file
362
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/internal/cpuinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
type decodeSyncAsmContext struct {
|
||||||
|
llTable []decSymbol
|
||||||
|
mlTable []decSymbol
|
||||||
|
ofTable []decSymbol
|
||||||
|
llState uint64
|
||||||
|
mlState uint64
|
||||||
|
ofState uint64
|
||||||
|
iteration int
|
||||||
|
litRemain int
|
||||||
|
out []byte
|
||||||
|
outPosition int
|
||||||
|
literals []byte
|
||||||
|
litPosition int
|
||||||
|
history []byte
|
||||||
|
windowSize int
|
||||||
|
ll int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||||
|
ml int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||||
|
mo int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
||||||
|
//
|
||||||
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
|
// decode sequences from the stream with the provided history but without a dictionary.
|
||||||
|
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||||
|
if len(s.dict) > 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
useSafe := false
|
||||||
|
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
||||||
|
useSafe = true
|
||||||
|
}
|
||||||
|
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
|
||||||
|
useSafe = true
|
||||||
|
}
|
||||||
|
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
||||||
|
useSafe = true
|
||||||
|
}
|
||||||
|
|
||||||
|
br := s.br
|
||||||
|
|
||||||
|
maxBlockSize := maxCompressedBlockSize
|
||||||
|
if s.windowSize < maxBlockSize {
|
||||||
|
maxBlockSize = s.windowSize
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := decodeSyncAsmContext{
|
||||||
|
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||||
|
mlTable: s.matchLengths.fse.dt[:maxTablesize],
|
||||||
|
ofTable: s.offsets.fse.dt[:maxTablesize],
|
||||||
|
llState: uint64(s.litLengths.state.state),
|
||||||
|
mlState: uint64(s.matchLengths.state.state),
|
||||||
|
ofState: uint64(s.offsets.state.state),
|
||||||
|
iteration: s.nSeqs - 1,
|
||||||
|
litRemain: len(s.literals),
|
||||||
|
out: s.out,
|
||||||
|
outPosition: len(s.out),
|
||||||
|
literals: s.literals,
|
||||||
|
windowSize: s.windowSize,
|
||||||
|
history: hist,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.seqSize = 0
|
||||||
|
startSize := len(s.out)
|
||||||
|
|
||||||
|
var errCode int
|
||||||
|
if cpuinfo.HasBMI2() {
|
||||||
|
if useSafe {
|
||||||
|
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
|
||||||
|
} else {
|
||||||
|
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if useSafe {
|
||||||
|
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
|
||||||
|
} else {
|
||||||
|
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch errCode {
|
||||||
|
case noError:
|
||||||
|
break
|
||||||
|
|
||||||
|
case errorMatchLenOfsMismatch:
|
||||||
|
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
|
||||||
|
|
||||||
|
case errorMatchLenTooBig:
|
||||||
|
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
|
||||||
|
|
||||||
|
case errorMatchOffTooBig:
|
||||||
|
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
|
||||||
|
ctx.mo, ctx.outPosition+len(hist)-startSize)
|
||||||
|
|
||||||
|
case errorNotEnoughLiterals:
|
||||||
|
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
||||||
|
ctx.ll, ctx.litRemain+ctx.ll)
|
||||||
|
|
||||||
|
case errorNotEnoughSpace:
|
||||||
|
size := ctx.outPosition + ctx.ll + ctx.ml
|
||||||
|
if debugDecoder {
|
||||||
|
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
||||||
|
}
|
||||||
|
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.seqSize += ctx.litRemain
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
err := br.close()
|
||||||
|
if err != nil {
|
||||||
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.literals = s.literals[ctx.litPosition:]
|
||||||
|
t := ctx.outPosition
|
||||||
|
s.out = s.out[:t]
|
||||||
|
|
||||||
|
// Add final literals
|
||||||
|
s.out = append(s.out, s.literals...)
|
||||||
|
if debugDecoder {
|
||||||
|
t += len(s.literals)
|
||||||
|
if t != len(s.out) {
|
||||||
|
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type decodeAsmContext struct {
|
||||||
|
llTable []decSymbol
|
||||||
|
mlTable []decSymbol
|
||||||
|
ofTable []decSymbol
|
||||||
|
llState uint64
|
||||||
|
mlState uint64
|
||||||
|
ofState uint64
|
||||||
|
iteration int
|
||||||
|
seqs []seqVals
|
||||||
|
litRemain int
|
||||||
|
}
|
||||||
|
|
||||||
|
const noError = 0
|
||||||
|
|
||||||
|
// error reported when mo == 0 && ml > 0
|
||||||
|
const errorMatchLenOfsMismatch = 1
|
||||||
|
|
||||||
|
// error reported when ml > maxMatchLen
|
||||||
|
const errorMatchLenTooBig = 2
|
||||||
|
|
||||||
|
// error reported when mo > available history or mo > s.windowSize
|
||||||
|
const errorMatchOffTooBig = 3
|
||||||
|
|
||||||
|
// error reported when the sum of literal lengths exeeceds the literal buffer size
|
||||||
|
const errorNotEnoughLiterals = 4
|
||||||
|
|
||||||
|
// error reported when capacity of `out` is too small
|
||||||
|
const errorNotEnoughSpace = 5
|
||||||
|
|
||||||
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
|
//
|
||||||
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
|
//
|
||||||
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
|
// decode sequences from the stream without the provided history.
|
||||||
|
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||||
|
br := s.br
|
||||||
|
|
||||||
|
maxBlockSize := maxCompressedBlockSize
|
||||||
|
if s.windowSize < maxBlockSize {
|
||||||
|
maxBlockSize = s.windowSize
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := decodeAsmContext{
|
||||||
|
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||||
|
mlTable: s.matchLengths.fse.dt[:maxTablesize],
|
||||||
|
ofTable: s.offsets.fse.dt[:maxTablesize],
|
||||||
|
llState: uint64(s.litLengths.state.state),
|
||||||
|
mlState: uint64(s.matchLengths.state.state),
|
||||||
|
ofState: uint64(s.offsets.state.state),
|
||||||
|
seqs: seqs,
|
||||||
|
iteration: len(seqs) - 1,
|
||||||
|
litRemain: len(s.literals),
|
||||||
|
}
|
||||||
|
|
||||||
|
s.seqSize = 0
|
||||||
|
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
||||||
|
var errCode int
|
||||||
|
if cpuinfo.HasBMI2() {
|
||||||
|
if lte56bits {
|
||||||
|
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
|
||||||
|
} else {
|
||||||
|
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if lte56bits {
|
||||||
|
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
|
||||||
|
} else {
|
||||||
|
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errCode != 0 {
|
||||||
|
i := len(seqs) - ctx.iteration - 1
|
||||||
|
switch errCode {
|
||||||
|
case errorMatchLenOfsMismatch:
|
||||||
|
ml := ctx.seqs[i].ml
|
||||||
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
|
|
||||||
|
case errorMatchLenTooBig:
|
||||||
|
ml := ctx.seqs[i].ml
|
||||||
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
|
|
||||||
|
case errorNotEnoughLiterals:
|
||||||
|
ll := ctx.seqs[i].ll
|
||||||
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.litRemain < 0 {
|
||||||
|
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
|
||||||
|
len(s.literals), len(s.literals)-ctx.litRemain)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.seqSize += ctx.litRemain
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
err := br.close()
|
||||||
|
if err != nil {
|
||||||
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type executeAsmContext struct {
|
||||||
|
seqs []seqVals
|
||||||
|
seqIndex int
|
||||||
|
out []byte
|
||||||
|
history []byte
|
||||||
|
literals []byte
|
||||||
|
outPosition int
|
||||||
|
litPosition int
|
||||||
|
windowSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
|
||||||
|
//
|
||||||
|
// Returns false if a match offset is too big.
|
||||||
|
//
|
||||||
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||||
|
|
||||||
|
// Same as above, but with safe memcopies
|
||||||
|
//go:noescape
|
||||||
|
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
|
||||||
|
|
||||||
|
// executeSimple handles cases when dictionary is not used.
|
||||||
|
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
|
||||||
|
// Ensure we have enough output size...
|
||||||
|
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
|
||||||
|
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
|
||||||
|
s.out = append(s.out, make([]byte, addBytes)...)
|
||||||
|
s.out = s.out[:len(s.out)-addBytes]
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var t = len(s.out)
|
||||||
|
out := s.out[:t+s.seqSize]
|
||||||
|
|
||||||
|
ctx := executeAsmContext{
|
||||||
|
seqs: seqs,
|
||||||
|
seqIndex: 0,
|
||||||
|
out: out,
|
||||||
|
history: hist,
|
||||||
|
outPosition: t,
|
||||||
|
litPosition: 0,
|
||||||
|
literals: s.literals,
|
||||||
|
windowSize: s.windowSize,
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
||||||
|
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
|
||||||
|
} else {
|
||||||
|
ok = sequenceDecs_executeSimple_amd64(&ctx)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
|
||||||
|
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
|
||||||
|
}
|
||||||
|
s.literals = s.literals[ctx.litPosition:]
|
||||||
|
t = ctx.outPosition
|
||||||
|
|
||||||
|
// Add final literals
|
||||||
|
copy(out[t:], s.literals)
|
||||||
|
if debugDecoder {
|
||||||
|
t += len(s.literals)
|
||||||
|
if t != len(out) {
|
||||||
|
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.out = out
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3689
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
Normal file
3689
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
237
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
Normal file
237
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
|||||||
|
//go:build !amd64 || appengine || !gc || noasm
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// decode sequences from the stream with the provided history but without dictionary.
|
||||||
|
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode sequences from the stream without the provided history.
|
||||||
|
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||||
|
br := s.br
|
||||||
|
|
||||||
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
|
s.seqSize = 0
|
||||||
|
litRemain := len(s.literals)
|
||||||
|
|
||||||
|
maxBlockSize := maxCompressedBlockSize
|
||||||
|
if s.windowSize < maxBlockSize {
|
||||||
|
maxBlockSize = s.windowSize
|
||||||
|
}
|
||||||
|
for i := range seqs {
|
||||||
|
var ll, mo, ml int
|
||||||
|
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||||
|
// inlined function:
|
||||||
|
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||||
|
|
||||||
|
// Final will not read from stream.
|
||||||
|
var llB, mlB, moB uint8
|
||||||
|
ll, llB = llState.final()
|
||||||
|
ml, mlB = mlState.final()
|
||||||
|
mo, moB = ofState.final()
|
||||||
|
|
||||||
|
// extra bits are stored in reverse order.
|
||||||
|
br.fillFast()
|
||||||
|
mo += br.getBits(moB)
|
||||||
|
if s.maxBits > 32 {
|
||||||
|
br.fillFast()
|
||||||
|
}
|
||||||
|
ml += br.getBits(mlB)
|
||||||
|
ll += br.getBits(llB)
|
||||||
|
|
||||||
|
if moB > 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = mo
|
||||||
|
} else {
|
||||||
|
// mo = s.adjustOffset(mo, ll, moB)
|
||||||
|
// Inlined for rather big speedup
|
||||||
|
if ll == 0 {
|
||||||
|
// There is an exception though, when current sequence's literals_length = 0.
|
||||||
|
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||||
|
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||||
|
mo++
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo == 0 {
|
||||||
|
mo = s.prevOffset[0]
|
||||||
|
} else {
|
||||||
|
var temp int
|
||||||
|
if mo == 3 {
|
||||||
|
temp = s.prevOffset[0] - 1
|
||||||
|
} else {
|
||||||
|
temp = s.prevOffset[mo]
|
||||||
|
}
|
||||||
|
|
||||||
|
if temp == 0 {
|
||||||
|
// 0 is not valid; input is corrupted; force offset to 1
|
||||||
|
println("WARNING: temp was 0")
|
||||||
|
temp = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo != 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
}
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = temp
|
||||||
|
mo = temp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
br.fillFast()
|
||||||
|
} else {
|
||||||
|
if br.overread() {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("reading sequence %d, exceeded available data\n", i)
|
||||||
|
}
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||||
|
br.fill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugSequences {
|
||||||
|
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||||
|
}
|
||||||
|
// Evaluate.
|
||||||
|
// We might be doing this async, so do it early.
|
||||||
|
if mo == 0 && ml > 0 {
|
||||||
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
|
}
|
||||||
|
if ml > maxMatchLen {
|
||||||
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
|
}
|
||||||
|
s.seqSize += ll + ml
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
litRemain -= ll
|
||||||
|
if litRemain < 0 {
|
||||||
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||||
|
}
|
||||||
|
seqs[i] = seqVals{
|
||||||
|
ll: ll,
|
||||||
|
ml: ml,
|
||||||
|
mo: mo,
|
||||||
|
}
|
||||||
|
if i == len(seqs)-1 {
|
||||||
|
// This is the last sequence, so we shouldn't update state.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manually inlined, ~ 5-20% faster
|
||||||
|
// Update all 3 states at once. Approx 20% faster.
|
||||||
|
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||||
|
if nBits == 0 {
|
||||||
|
llState = llTable[llState.newState()&maxTableMask]
|
||||||
|
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||||
|
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||||
|
} else {
|
||||||
|
bits := br.get32BitsFast(nBits)
|
||||||
|
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||||
|
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||||
|
lowBits &= bitMask[mlState.nbBits()&15]
|
||||||
|
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||||
|
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.seqSize += litRemain
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
err := br.close()
|
||||||
|
if err != nil {
|
||||||
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeSimple handles cases when a dictionary is not used.
|
||||||
|
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
|
||||||
|
// Ensure we have enough output size...
|
||||||
|
if len(s.out)+s.seqSize > cap(s.out) {
|
||||||
|
addBytes := s.seqSize + len(s.out)
|
||||||
|
s.out = append(s.out, make([]byte, addBytes)...)
|
||||||
|
s.out = s.out[:len(s.out)-addBytes]
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var t = len(s.out)
|
||||||
|
out := s.out[:t+s.seqSize]
|
||||||
|
|
||||||
|
for _, seq := range seqs {
|
||||||
|
// Add literals
|
||||||
|
copy(out[t:], s.literals[:seq.ll])
|
||||||
|
t += seq.ll
|
||||||
|
s.literals = s.literals[seq.ll:]
|
||||||
|
|
||||||
|
// Malformed input
|
||||||
|
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy from history.
|
||||||
|
if v := seq.mo - t; v > 0 {
|
||||||
|
// v is the start position in history from end.
|
||||||
|
start := len(hist) - v
|
||||||
|
if seq.ml > v {
|
||||||
|
// Some goes into the current block.
|
||||||
|
// Copy remainder of history
|
||||||
|
copy(out[t:], hist[start:])
|
||||||
|
t += v
|
||||||
|
seq.ml -= v
|
||||||
|
} else {
|
||||||
|
copy(out[t:], hist[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We must be in the current buffer now
|
||||||
|
if seq.ml > 0 {
|
||||||
|
start := t - seq.mo
|
||||||
|
if seq.ml <= t-start {
|
||||||
|
// No overlap
|
||||||
|
copy(out[t:], out[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
} else {
|
||||||
|
// Overlapping copy
|
||||||
|
// Extend destination slice and copy one byte at the time.
|
||||||
|
src := out[start : start+seq.ml]
|
||||||
|
dst := out[t:]
|
||||||
|
dst = dst[:len(src)]
|
||||||
|
t += len(src)
|
||||||
|
// Destination is the space we just added.
|
||||||
|
for i := range src {
|
||||||
|
dst[i] = src[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add final literals
|
||||||
|
copy(out[t:], s.literals)
|
||||||
|
if debugDecoder {
|
||||||
|
t += len(s.literals)
|
||||||
|
if t != len(out) {
|
||||||
|
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.out = out
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
59
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
59
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
@@ -18,26 +18,44 @@ const ZipMethodWinZip = 93
|
|||||||
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
|
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
|
||||||
const ZipMethodPKWare = 20
|
const ZipMethodPKWare = 20
|
||||||
|
|
||||||
var zipReaderPool sync.Pool
|
// zipReaderPool is the default reader pool.
|
||||||
|
var zipReaderPool = sync.Pool{New: func() interface{} {
|
||||||
|
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return z
|
||||||
|
}}
|
||||||
|
|
||||||
// newZipReader creates a pooled zip decompressor.
|
// newZipReader creates a pooled zip decompressor.
|
||||||
func newZipReader(r io.Reader) io.ReadCloser {
|
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
|
||||||
dec, ok := zipReaderPool.Get().(*Decoder)
|
pool := &zipReaderPool
|
||||||
if ok {
|
if len(opts) > 0 {
|
||||||
dec.Reset(r)
|
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
|
||||||
} else {
|
// Force concurrency 1
|
||||||
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
opts = append(opts, WithDecoderConcurrency(1))
|
||||||
if err != nil {
|
// Create our own pool
|
||||||
panic(err)
|
pool = &sync.Pool{}
|
||||||
}
|
}
|
||||||
dec = d
|
return func(r io.Reader) io.ReadCloser {
|
||||||
|
dec, ok := pool.Get().(*Decoder)
|
||||||
|
if ok {
|
||||||
|
dec.Reset(r)
|
||||||
|
} else {
|
||||||
|
d, err := NewReader(r, opts...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
dec = d
|
||||||
|
}
|
||||||
|
return &pooledZipReader{dec: dec, pool: pool}
|
||||||
}
|
}
|
||||||
return &pooledZipReader{dec: dec}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type pooledZipReader struct {
|
type pooledZipReader struct {
|
||||||
mu sync.Mutex // guards Close and Read
|
mu sync.Mutex // guards Close and Read
|
||||||
dec *Decoder
|
pool *sync.Pool
|
||||||
|
dec *Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
||||||
@@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
dec, err := r.dec.Read(p)
|
dec, err := r.dec.Read(p)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
err = r.dec.Reset(nil)
|
r.dec.Reset(nil)
|
||||||
zipReaderPool.Put(r.dec)
|
r.pool.Put(r.dec)
|
||||||
r.dec = nil
|
r.dec = nil
|
||||||
}
|
}
|
||||||
return dec, err
|
return dec, err
|
||||||
@@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error {
|
|||||||
var err error
|
var err error
|
||||||
if r.dec != nil {
|
if r.dec != nil {
|
||||||
err = r.dec.Reset(nil)
|
err = r.dec.Reset(nil)
|
||||||
zipReaderPool.Put(r.dec)
|
r.pool.Put(r.dec)
|
||||||
r.dec = nil
|
r.dec = nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
|||||||
|
|
||||||
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
||||||
// See ZipCompressor for example.
|
// See ZipCompressor for example.
|
||||||
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
// Options can be specified. WithDecoderConcurrency(1) is forced,
|
||||||
return newZipReader
|
// and by default a 128MB maximum decompression window is specified.
|
||||||
|
// The window size can be overridden if required.
|
||||||
|
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
|
||||||
|
return newZipReader(opts...)
|
||||||
}
|
}
|
||||||
|
11
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchLenFast does matching, but will not match the last up to 7 bytes.
|
|
||||||
func matchLenFast(a, b []byte) int {
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
return i + bits.TrailingZeros64(diff)>>3
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endI
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchLen returns the maximum length.
|
// matchLen returns the maximum length.
|
||||||
// a must be the shortest of the two.
|
// a must be the shortest of the two.
|
||||||
// The function also returns whether all bytes matched.
|
// The function also returns whether all bytes matched.
|
||||||
|
22
vendor/github.com/moby/buildkit/client/diskusage.go
generated
vendored
22
vendor/github.com/moby/buildkit/client/diskusage.go
generated
vendored
@@ -10,18 +10,18 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type UsageInfo struct {
|
type UsageInfo struct {
|
||||||
ID string
|
ID string `json:"id"`
|
||||||
Mutable bool
|
Mutable bool `json:"mutable"`
|
||||||
InUse bool
|
InUse bool `json:"inUse"`
|
||||||
Size int64
|
Size int64 `json:"size"`
|
||||||
|
|
||||||
CreatedAt time.Time
|
CreatedAt time.Time `json:"createdAt"`
|
||||||
LastUsedAt *time.Time
|
LastUsedAt *time.Time `json:"lastUsedAt"`
|
||||||
UsageCount int
|
UsageCount int `json:"usageCount"`
|
||||||
Parents []string
|
Parents []string `json:"parents"`
|
||||||
Description string
|
Description string `json:"description"`
|
||||||
RecordType UsageRecordType
|
RecordType UsageRecordType `json:"recordType"`
|
||||||
Shared bool
|
Shared bool `json:"shared"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
|
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
|
||||||
|
8
vendor/github.com/moby/buildkit/client/info.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/info.go
generated
vendored
@@ -9,13 +9,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Info struct {
|
type Info struct {
|
||||||
BuildkitVersion BuildkitVersion
|
BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BuildkitVersion struct {
|
type BuildkitVersion struct {
|
||||||
Package string
|
Package string `json:"package"`
|
||||||
Version string
|
Version string `json:"version"`
|
||||||
Revision string
|
Revision string `json:"revision"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Info(ctx context.Context) (*Info, error) {
|
func (c *Client) Info(ctx context.Context) (*Info, error) {
|
||||||
|
2
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
@@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
|
|||||||
}
|
}
|
||||||
smc.index[l.SourceMap] = idx
|
smc.index[l.SourceMap] = idx
|
||||||
}
|
}
|
||||||
smc.locations[dgst] = ls
|
smc.locations[dgst] = append(smc.locations[dgst], ls...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
|
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
|
||||||
|
2
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
@@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect
|
|||||||
if opMeta != nil {
|
if opMeta != nil {
|
||||||
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
|
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
|
||||||
}
|
}
|
||||||
|
s.Add(dgst, sls)
|
||||||
if _, ok := cache[dgst]; ok {
|
if _, ok := cache[dgst]; ok {
|
||||||
return def, nil
|
return def, nil
|
||||||
}
|
}
|
||||||
s.Add(dgst, sls)
|
|
||||||
def.Def = append(def.Def, dt)
|
def.Def = append(def.Def, dt)
|
||||||
cache[dgst] = struct{}{}
|
cache[dgst] = struct{}{}
|
||||||
return def, nil
|
return def, nil
|
||||||
|
6
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
6
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
@@ -209,8 +209,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||||||
<-time.After(3 * time.Second)
|
<-time.After(3 * time.Second)
|
||||||
cancelStatus()
|
cancelStatus()
|
||||||
}()
|
}()
|
||||||
bklog.G(ctx).Debugf("stopping session")
|
if !opt.SessionPreInitialized {
|
||||||
s.Close()
|
bklog.G(ctx).Debugf("stopping session")
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
var pbd *pb.Definition
|
var pbd *pb.Definition
|
||||||
if def != nil {
|
if def != nil {
|
||||||
|
81
vendor/github.com/moby/buildkit/frontend/subrequests/describe.go
generated
vendored
Normal file
81
vendor/github.com/moby/buildkit/frontend/subrequests/describe.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package subrequests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const RequestSubrequestsDescribe = "frontend.subrequests.describe"
|
||||||
|
|
||||||
|
var SubrequestsDescribeDefinition = Request{
|
||||||
|
Name: RequestSubrequestsDescribe,
|
||||||
|
Version: "1.0.0",
|
||||||
|
Type: TypeRPC,
|
||||||
|
Description: "List available subrequest types",
|
||||||
|
Metadata: []Named{
|
||||||
|
{Name: "result.json"},
|
||||||
|
{Name: "result.txt"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func Describe(ctx context.Context, c client.Client) ([]Request, error) {
|
||||||
|
gwcaps := c.BuildOpts().Caps
|
||||||
|
|
||||||
|
if err := (&gwcaps).Supports(gwpb.CapFrontendCaps); err != nil {
|
||||||
|
return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := c.Solve(ctx, client.SolveRequest{
|
||||||
|
FrontendOpt: map[string]string{
|
||||||
|
"requestid": RequestSubrequestsDescribe,
|
||||||
|
"frontend.caps": "moby.buildkit.frontend.subrequests",
|
||||||
|
},
|
||||||
|
Frontend: "dockerfile.v0",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
var reqErr *errdefs.UnsupportedSubrequestError
|
||||||
|
if errors.As(err, &reqErr) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var capErr *errdefs.UnsupportedFrontendCapError
|
||||||
|
if errors.As(err, &capErr) {
|
||||||
|
return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, ok := res.Metadata["result.json"]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("no result.json metadata in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var reqs []Request
|
||||||
|
if err := json.Unmarshal(dt, &reqs); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to parse describe result")
|
||||||
|
}
|
||||||
|
return reqs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrintDescribe(dt []byte, w io.Writer) error {
|
||||||
|
var d []Request
|
||||||
|
if err := json.Unmarshal(dt, &d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
|
||||||
|
fmt.Fprintf(tw, "NAME\tVERSION\tDESCRIPTION\n")
|
||||||
|
|
||||||
|
for _, r := range d {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", strings.TrimPrefix(r.Name, "frontend."), r.Version, r.Description)
|
||||||
|
}
|
||||||
|
return tw.Flush()
|
||||||
|
}
|
146
vendor/github.com/moby/buildkit/frontend/subrequests/outline/outline.go
generated
vendored
Normal file
146
vendor/github.com/moby/buildkit/frontend/subrequests/outline/outline.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
package outline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
const RequestSubrequestsOutline = "frontend.outline"
|
||||||
|
|
||||||
|
var SubrequestsOutlineDefinition = subrequests.Request{
|
||||||
|
Name: RequestSubrequestsOutline,
|
||||||
|
Version: "1.0.0",
|
||||||
|
Type: subrequests.TypeRPC,
|
||||||
|
Description: "List all parameters current build target supports",
|
||||||
|
Opts: []subrequests.Named{
|
||||||
|
{
|
||||||
|
Name: "target",
|
||||||
|
Description: "Target build stage",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: []subrequests.Named{
|
||||||
|
{Name: "result.json"},
|
||||||
|
{Name: "result.txt"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type Outline struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Args []Arg `json:"args,omitempty"`
|
||||||
|
Secrets []Secret `json:"secrets,omitempty"`
|
||||||
|
SSH []SSH `json:"ssh,omitempty"`
|
||||||
|
Cache []CacheMount `json:"cache,omitempty"`
|
||||||
|
Sources [][]byte `json:"sources,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o Outline) ToResult() (*client.Result, error) {
|
||||||
|
res := client.NewResult()
|
||||||
|
dt, err := json.MarshalIndent(o, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.AddMeta("result.json", dt)
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
if err := PrintOutline(dt, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.AddMeta("result.txt", b.Bytes())
|
||||||
|
|
||||||
|
res.AddMeta("version", []byte(SubrequestsOutlineDefinition.Version))
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Arg struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Value string `json:"value,omitempty"`
|
||||||
|
Location *pb.Location `json:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Secret struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Required bool `json:"required,omitempty"`
|
||||||
|
Location *pb.Location `json:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SSH struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Required bool `json:"required,omitempty"`
|
||||||
|
Location *pb.Location `json:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheMount struct {
|
||||||
|
ID string `json:"ID"`
|
||||||
|
Location *pb.Location `json:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrintOutline(dt []byte, w io.Writer) error {
|
||||||
|
var o Outline
|
||||||
|
|
||||||
|
if err := json.Unmarshal(dt, &o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Name != "" || o.Description != "" {
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
|
||||||
|
name := o.Name
|
||||||
|
if o.Name == "" {
|
||||||
|
name = "(default)"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "TARGET:\t%s\n", name)
|
||||||
|
if o.Description != "" {
|
||||||
|
fmt.Fprintf(tw, "DESCRIPTION:\t%s\n", o.Description)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(tw)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(o.Args) > 0 {
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
|
||||||
|
fmt.Fprintf(tw, "BUILD ARG\tVALUE\tDESCRIPTION\n")
|
||||||
|
for _, a := range o.Args {
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Name, a.Value, a.Description)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(tw)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(o.Secrets) > 0 {
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
|
||||||
|
fmt.Fprintf(tw, "SECRET\tREQUIRED\n")
|
||||||
|
for _, s := range o.Secrets {
|
||||||
|
b := ""
|
||||||
|
if s.Required {
|
||||||
|
b = "true"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", s.Name, b)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(tw)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(o.SSH) > 0 {
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
|
||||||
|
fmt.Fprintf(tw, "SSH\tREQUIRED\n")
|
||||||
|
for _, s := range o.SSH {
|
||||||
|
b := ""
|
||||||
|
if s.Required {
|
||||||
|
b = "true"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", s.Name, b)
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
fmt.Fprintln(tw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
84
vendor/github.com/moby/buildkit/frontend/subrequests/targets/targets.go
generated
vendored
Normal file
84
vendor/github.com/moby/buildkit/frontend/subrequests/targets/targets.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package targets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/frontend/gateway/client"
|
||||||
|
"github.com/moby/buildkit/frontend/subrequests"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
const RequestTargets = "frontend.targets"
|
||||||
|
|
||||||
|
var SubrequestsTargetsDefinition = subrequests.Request{
|
||||||
|
Name: RequestTargets,
|
||||||
|
Version: "1.0.0",
|
||||||
|
Type: subrequests.TypeRPC,
|
||||||
|
Description: "List all targets current build supports",
|
||||||
|
Opts: []subrequests.Named{},
|
||||||
|
Metadata: []subrequests.Named{
|
||||||
|
{Name: "result.json"},
|
||||||
|
{Name: "result.txt"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type List struct {
|
||||||
|
Targets []Target `json:"targets"`
|
||||||
|
Sources [][]byte `json:"sources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l List) ToResult() (*client.Result, error) {
|
||||||
|
res := client.NewResult()
|
||||||
|
dt, err := json.MarshalIndent(l, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.AddMeta("result.json", dt)
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
if err := PrintTargets(dt, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.AddMeta("result.txt", b.Bytes())
|
||||||
|
|
||||||
|
res.AddMeta("version", []byte(SubrequestsTargetsDefinition.Version))
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Target struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Default bool `json:"default,omitempty"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Base string `json:"base,omitempty"`
|
||||||
|
Platform string `json:"platform,omitempty"`
|
||||||
|
Location *pb.Location `json:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrintTargets(dt []byte, w io.Writer) error {
|
||||||
|
var l List
|
||||||
|
|
||||||
|
if err := json.Unmarshal(dt, &l); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
|
||||||
|
fmt.Fprintf(tw, "TARGET\tDESCRIPTION\n")
|
||||||
|
|
||||||
|
for _, t := range l.Targets {
|
||||||
|
name := t.Name
|
||||||
|
if name == "" && t.Default {
|
||||||
|
name = "(default)"
|
||||||
|
} else {
|
||||||
|
if t.Default {
|
||||||
|
name = fmt.Sprintf("%s (default)", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tw, "%s\t%s\n", name, t.Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tw.Flush()
|
||||||
|
}
|
21
vendor/github.com/moby/buildkit/frontend/subrequests/types.go
generated
vendored
Normal file
21
vendor/github.com/moby/buildkit/frontend/subrequests/types.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package subrequests
|
||||||
|
|
||||||
|
type Request struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Type RequestType `json:"type"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Opts []Named `json:"opts"`
|
||||||
|
Inputs []Named `json:"inputs"`
|
||||||
|
Metadata []Named `json:"metadata"`
|
||||||
|
Refs []Named `json:"refs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Named struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RequestType string
|
||||||
|
|
||||||
|
const TypeRPC RequestType = "rpc"
|
324
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
324
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
@@ -1586,8 +1586,8 @@ func (m *Range) GetEnd() Position {
|
|||||||
|
|
||||||
// Position is single location in a source file
|
// Position is single location in a source file
|
||||||
type Position struct {
|
type Position struct {
|
||||||
Line int32 `protobuf:"varint,1,opt,name=Line,proto3" json:"Line,omitempty"`
|
Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"`
|
||||||
Character int32 `protobuf:"varint,2,opt,name=Character,proto3" json:"Character,omitempty"`
|
Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Position) Reset() { *m = Position{} }
|
func (m *Position) Reset() { *m = Position{} }
|
||||||
@@ -2831,166 +2831,166 @@ func init() {
|
|||||||
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
|
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
|
||||||
|
|
||||||
var fileDescriptor_8de16154b2733812 = []byte{
|
var fileDescriptor_8de16154b2733812 = []byte{
|
||||||
// 2538 bytes of a gzipped FileDescriptorProto
|
// 2535 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x5b, 0xc7,
|
||||||
0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4,
|
0x11, 0x17, 0xff, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xa8, 0xae, 0xac, 0xbc, 0xa4, 0x81,
|
||||||
0x1b, 0xc8, 0xb2, 0x2d, 0xe1, 0xab, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xdb,
|
0x2c, 0xdb, 0x12, 0xaa, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, 0xb0,
|
||||||
0xa2, 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6,
|
0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, 0x23,
|
||||||
0x2d, 0x23, 0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x7f, 0xa2, 0xc7,
|
0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x2f, 0xd1, 0x63, 0x7b, 0x0f,
|
||||||
0xf6, 0x1e, 0xa0, 0x97, 0x1c, 0x7a, 0x08, 0x7a, 0x48, 0x0b, 0xe7, 0xd2, 0x3f, 0xa2, 0x05, 0x8a,
|
0xd0, 0x4b, 0x0e, 0x3d, 0x04, 0x3d, 0xa4, 0x85, 0x73, 0xe9, 0x87, 0x68, 0x81, 0x62, 0x66, 0xf7,
|
||||||
0x99, 0xdd, 0xf7, 0x83, 0x94, 0x02, 0xc7, 0x6d, 0xd1, 0x13, 0xe7, 0xcd, 0x7c, 0x76, 0x66, 0x76,
|
0xfd, 0x21, 0xa5, 0xc0, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0xbf, 0x9d, 0x99, 0xdd, 0x9d, 0xd9,
|
||||||
0x77, 0x66, 0x67, 0x76, 0x09, 0x0d, 0x19, 0xc5, 0x5b, 0x91, 0x92, 0x5a, 0xb2, 0x62, 0x74, 0xbc,
|
0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xaf, 0xde, 0x3b,
|
||||||
0x7a, 0xef, 0xc4, 0xd7, 0xa7, 0xd3, 0xe3, 0x2d, 0x4f, 0x4e, 0xb6, 0x4f, 0xe4, 0x89, 0xdc, 0x26,
|
0xf1, 0xf5, 0xe9, 0xf4, 0x78, 0xcb, 0x93, 0x93, 0xed, 0x13, 0x79, 0x22, 0xb7, 0x49, 0x74, 0x3c,
|
||||||
0xd1, 0xf1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xad, 0x08, 0xc5, 0x41, 0xc4,
|
0x1d, 0xd3, 0x17, 0x7d, 0x10, 0x65, 0x86, 0x38, 0xff, 0x28, 0x42, 0x71, 0x10, 0xb1, 0xb7, 0xa1,
|
||||||
0xde, 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x4e, 0x63, 0x2b,
|
0xea, 0x87, 0xd1, 0x54, 0xc7, 0x9d, 0xc2, 0x7a, 0x69, 0xa3, 0xb9, 0xd3, 0xd8, 0x8a, 0x8e, 0xb7,
|
||||||
0x3a, 0xde, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0xb8, 0x10, 0x5e, 0xa7, 0xb8, 0x5e,
|
0xfa, 0xc8, 0xe1, 0x56, 0xc0, 0xd6, 0xa1, 0x2c, 0x2e, 0x84, 0xd7, 0x29, 0xae, 0x17, 0x36, 0x9a,
|
||||||
0xd8, 0x68, 0xee, 0x00, 0x02, 0x7a, 0x17, 0xc2, 0x1b, 0x44, 0x07, 0x4b, 0x9c, 0x24, 0xec, 0x03,
|
0x3b, 0x80, 0x80, 0xde, 0x85, 0xf0, 0x06, 0xd1, 0xc1, 0x12, 0x27, 0x09, 0x7b, 0x0f, 0xaa, 0xb1,
|
||||||
0xa8, 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a,
|
0x9c, 0x2a, 0x4f, 0x74, 0x4a, 0x84, 0x59, 0x46, 0xcc, 0x90, 0x38, 0x84, 0xb2, 0x52, 0xd4, 0x34,
|
||||||
0x51, 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe8, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e,
|
0xf6, 0x03, 0xd1, 0x29, 0x67, 0x9a, 0x1e, 0xfa, 0x81, 0xc1, 0x90, 0x84, 0xbd, 0x03, 0x95, 0xe3,
|
||||||
0x54, 0x8e, 0xa7, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0x63, 0x64,
|
0xa9, 0x1f, 0x8c, 0x3a, 0x15, 0x82, 0x34, 0x11, 0xb2, 0x87, 0x0c, 0xc2, 0x18, 0x19, 0x82, 0x26,
|
||||||
0x08, 0x9a, 0x08, 0x75, 0x22, 0x3a, 0xd5, 0x0c, 0xf4, 0x04, 0x19, 0x06, 0x44, 0x32, 0xb4, 0x35,
|
0x42, 0x9d, 0x88, 0x4e, 0x35, 0x03, 0x3d, 0x46, 0x86, 0x01, 0x91, 0x0c, 0x6d, 0x8d, 0xfc, 0xf1,
|
||||||
0xf2, 0xc7, 0xe3, 0x4e, 0x2d, 0xb3, 0xd5, 0xf5, 0xc7, 0x63, 0x63, 0x0b, 0x25, 0x6c, 0x03, 0xea,
|
0xb8, 0x53, 0xcb, 0x6c, 0x75, 0xfd, 0xf1, 0xd8, 0xd8, 0x42, 0x09, 0xdb, 0x80, 0x7a, 0x14, 0xb8,
|
||||||
0x51, 0xe0, 0xea, 0xb1, 0x54, 0x93, 0x0e, 0x64, 0x7e, 0x1f, 0x59, 0x1e, 0x4f, 0xa5, 0xec, 0x3e,
|
0x7a, 0x2c, 0xd5, 0xa4, 0x03, 0x99, 0xdf, 0x47, 0x96, 0xc7, 0x53, 0x29, 0xbb, 0x0f, 0x4d, 0x4f,
|
||||||
0x34, 0x3d, 0x19, 0xc6, 0x5a, 0xb9, 0x7e, 0xa8, 0xe3, 0x4e, 0x93, 0xc0, 0x6f, 0x22, 0xf8, 0x33,
|
0x86, 0xb1, 0x56, 0xae, 0x1f, 0xea, 0xb8, 0xd3, 0x24, 0xf0, 0xeb, 0x08, 0xfe, 0x54, 0xaa, 0x33,
|
||||||
0xa9, 0xce, 0x84, 0xda, 0xcf, 0x84, 0x3c, 0x8f, 0xdc, 0x2b, 0x43, 0x51, 0x46, 0xce, 0xaf, 0x0a,
|
0xa1, 0xf6, 0x33, 0x21, 0xcf, 0x23, 0xf7, 0xca, 0x50, 0x94, 0x91, 0xf3, 0x9b, 0x02, 0xd4, 0x13,
|
||||||
0x50, 0x4f, 0xb4, 0x32, 0x07, 0x96, 0x77, 0x95, 0x77, 0xea, 0x6b, 0xe1, 0xe9, 0xa9, 0x12, 0x9d,
|
0xad, 0xcc, 0x81, 0xe5, 0x5d, 0xe5, 0x9d, 0xfa, 0x5a, 0x78, 0x7a, 0xaa, 0x44, 0xa7, 0xb0, 0x5e,
|
||||||
0xc2, 0x7a, 0x61, 0xa3, 0xc1, 0xe7, 0x78, 0xac, 0x05, 0xc5, 0xc1, 0x90, 0xd6, 0xbb, 0xc1, 0x8b,
|
0xd8, 0x68, 0xf0, 0x39, 0x1e, 0x6b, 0x41, 0x71, 0x30, 0xa4, 0xf5, 0x6e, 0xf0, 0xe2, 0x60, 0xc8,
|
||||||
0x83, 0x21, 0xeb, 0x40, 0xed, 0xb9, 0xab, 0x7c, 0x37, 0xd4, 0xb4, 0xc0, 0x0d, 0x9e, 0x7c, 0xb2,
|
0x3a, 0x50, 0x7b, 0xe6, 0x2a, 0xdf, 0x0d, 0x35, 0x2d, 0x70, 0x83, 0x27, 0x9f, 0xec, 0x26, 0x34,
|
||||||
0x9b, 0xd0, 0x18, 0x0c, 0x9f, 0x0b, 0x15, 0xfb, 0x32, 0xa4, 0x65, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6,
|
0x06, 0xc3, 0x67, 0x42, 0xc5, 0xbe, 0x0c, 0x69, 0x59, 0x1b, 0x3c, 0x63, 0xb0, 0x35, 0x80, 0xc1,
|
||||||
0x00, 0x06, 0xc3, 0x87, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7,
|
0xf0, 0xa1, 0x70, 0x51, 0x69, 0xdc, 0xa9, 0xac, 0x97, 0x36, 0x1a, 0x3c, 0xc7, 0x71, 0x7e, 0x01,
|
||||||
0xf9, 0x19, 0x54, 0x68, 0xab, 0xd9, 0xa7, 0x50, 0x1d, 0xf9, 0x27, 0x22, 0xd6, 0xc6, 0x9d, 0xbd,
|
0x15, 0xda, 0x6a, 0xf6, 0x31, 0x54, 0x47, 0xfe, 0x89, 0x88, 0xb5, 0x71, 0x67, 0x6f, 0xe7, 0x8b,
|
||||||
0x9d, 0x2f, 0xbf, 0xb9, 0xb5, 0xf4, 0xe7, 0x6f, 0x6e, 0x6d, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4,
|
0xaf, 0x6f, 0x2d, 0xfd, 0xf5, 0xeb, 0x5b, 0x9b, 0xb9, 0x98, 0x92, 0x91, 0x08, 0x3d, 0x19, 0x6a,
|
||||||
0x64, 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xde, 0x3e, 0x91, 0xf7, 0xcc, 0x90, 0xad, 0x2e, 0xfd, 0x70,
|
0xd7, 0x0f, 0x85, 0x8a, 0xb7, 0x4f, 0xe4, 0x3d, 0x33, 0x64, 0xab, 0x4b, 0x3f, 0xdc, 0x6a, 0x60,
|
||||||
0xab, 0x81, 0xdd, 0x86, 0x8a, 0x1f, 0x8e, 0xc4, 0x05, 0xf9, 0x5f, 0xda, 0xbb, 0x6e, 0x55, 0x35,
|
0xb7, 0xa1, 0xe2, 0x87, 0x23, 0x71, 0x41, 0xfe, 0x97, 0xf6, 0xae, 0x5b, 0x55, 0xcd, 0xc1, 0x54,
|
||||||
0x07, 0x53, 0x1d, 0x4d, 0x75, 0x1f, 0x45, 0xdc, 0x20, 0x9c, 0x3f, 0x16, 0xa0, 0x6a, 0x42, 0x89,
|
0x47, 0x53, 0xdd, 0x47, 0x11, 0x37, 0x08, 0xe7, 0xcf, 0x05, 0xa8, 0x9a, 0x50, 0x62, 0x37, 0xa1,
|
||||||
0xdd, 0x84, 0xf2, 0x44, 0x68, 0x97, 0xec, 0x37, 0x77, 0xea, 0x66, 0x4b, 0xb5, 0xcb, 0x89, 0x8b,
|
0x3c, 0x11, 0xda, 0x25, 0xfb, 0xcd, 0x9d, 0xba, 0xd9, 0x52, 0xed, 0x72, 0xe2, 0x62, 0x94, 0x4e,
|
||||||
0x51, 0x3a, 0x91, 0x53, 0x5c, 0xfb, 0x62, 0x16, 0xa5, 0x4f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x1f,
|
0xe4, 0x14, 0xd7, 0xbe, 0x98, 0x45, 0xe9, 0x63, 0xe4, 0x70, 0x2b, 0x60, 0x3f, 0x80, 0x5a, 0x28,
|
||||||
0xd4, 0x42, 0xa1, 0xcf, 0xa5, 0x3a, 0xa3, 0x35, 0x6a, 0x99, 0xb0, 0x38, 0x14, 0xfa, 0x89, 0x1c,
|
0xf4, 0xb9, 0x54, 0x67, 0xb4, 0x46, 0x2d, 0x13, 0x16, 0x87, 0x42, 0x3f, 0x96, 0x23, 0xc1, 0x13,
|
||||||
0x09, 0x9e, 0xc8, 0xd8, 0x5d, 0xa8, 0xc7, 0xc2, 0x9b, 0x2a, 0x5f, 0xcf, 0x68, 0xbd, 0x5a, 0x3b,
|
0x19, 0xbb, 0x0b, 0xf5, 0x58, 0x78, 0x53, 0xe5, 0xeb, 0x19, 0xad, 0x57, 0x6b, 0xa7, 0x4d, 0xc1,
|
||||||
0x6d, 0x0a, 0x56, 0xcb, 0x23, 0x70, 0x8a, 0x60, 0x77, 0xa0, 0x11, 0x0b, 0x4f, 0x09, 0x2d, 0xc2,
|
0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x0e, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x19, 0xad,
|
||||||
0xcf, 0x69, 0xfd, 0x9a, 0x3b, 0x2b, 0x16, 0xae, 0x84, 0xee, 0x85, 0x9f, 0xf3, 0x4c, 0xee, 0xfc,
|
0x5f, 0x73, 0x67, 0xc5, 0xc2, 0x95, 0xd0, 0xbd, 0xf0, 0x33, 0x9e, 0xc9, 0x9d, 0x5f, 0x15, 0xa1,
|
||||||
0xa2, 0x08, 0x65, 0xf4, 0x99, 0x31, 0x28, 0xbb, 0xea, 0xc4, 0x64, 0x54, 0x83, 0x13, 0xcd, 0xda,
|
0x8c, 0x3e, 0x33, 0x06, 0x65, 0x57, 0x9d, 0x98, 0x8c, 0x6a, 0x70, 0xa2, 0x59, 0x1b, 0x4a, 0xa8,
|
||||||
0x50, 0x42, 0x1d, 0x45, 0x62, 0x21, 0x89, 0x1c, 0xef, 0x7c, 0x64, 0x37, 0x14, 0x49, 0x1c, 0x37,
|
0xa3, 0x48, 0x2c, 0x24, 0x91, 0xe3, 0x9d, 0x8f, 0xec, 0x86, 0x22, 0x89, 0xe3, 0xa6, 0xb1, 0x50,
|
||||||
0x8d, 0x85, 0xb2, 0xfb, 0x48, 0x34, 0xbb, 0x0d, 0x8d, 0x48, 0xc9, 0x8b, 0xd9, 0x0b, 0xe3, 0x41,
|
0x76, 0x1f, 0x89, 0x66, 0xb7, 0xa1, 0x11, 0x29, 0x79, 0x31, 0x7b, 0x6e, 0x3c, 0xc8, 0xa2, 0x14,
|
||||||
0x16, 0xa5, 0xc8, 0x44, 0x07, 0xea, 0x91, 0xa5, 0xd8, 0x26, 0x80, 0xb8, 0xd0, 0xca, 0x3d, 0x90,
|
0x99, 0xe8, 0x40, 0x3d, 0xb2, 0x14, 0xdb, 0x04, 0x10, 0x17, 0x5a, 0xb9, 0x07, 0x32, 0xd6, 0x71,
|
||||||
0xb1, 0x8e, 0x3b, 0x55, 0xf2, 0x96, 0xe2, 0x1e, 0x19, 0xfd, 0x23, 0x9e, 0x93, 0xb2, 0x55, 0xa8,
|
0xa7, 0x4a, 0xde, 0x52, 0xdc, 0x23, 0xa3, 0x7f, 0xc4, 0x73, 0x52, 0xb6, 0x0a, 0xf5, 0x53, 0x19,
|
||||||
0x9f, 0xca, 0x58, 0x87, 0xee, 0x44, 0x50, 0x86, 0x34, 0x78, 0xfa, 0xcd, 0x1c, 0xa8, 0x4e, 0x03,
|
0xeb, 0xd0, 0x9d, 0x08, 0xca, 0x90, 0x06, 0x4f, 0xbf, 0x99, 0x03, 0xd5, 0x69, 0xe0, 0x4f, 0x7c,
|
||||||
0x7f, 0xe2, 0xeb, 0x4e, 0x23, 0xd3, 0xf1, 0x8c, 0x38, 0xdc, 0x4a, 0x30, 0x8a, 0xbd, 0x13, 0x25,
|
0xdd, 0x69, 0x64, 0x3a, 0x9e, 0x12, 0x87, 0x5b, 0x09, 0x46, 0xb1, 0x77, 0xa2, 0xe4, 0x34, 0x3a,
|
||||||
0xa7, 0xd1, 0x91, 0xab, 0x44, 0xa8, 0x29, 0x7f, 0x1a, 0x7c, 0x8e, 0xe7, 0xdc, 0x85, 0xaa, 0xb1,
|
0x72, 0x95, 0x08, 0x35, 0xe5, 0x4f, 0x83, 0xcf, 0xf1, 0x9c, 0xbb, 0x50, 0x35, 0x96, 0x71, 0x62,
|
||||||
0x8c, 0x13, 0x43, 0xca, 0xc6, 0x3a, 0xd1, 0x18, 0xe3, 0xfd, 0xa3, 0x24, 0xc6, 0xfb, 0x47, 0x4e,
|
0x48, 0xd9, 0x58, 0x27, 0x1a, 0x63, 0xbc, 0x7f, 0x94, 0xc4, 0x78, 0xff, 0xc8, 0xe9, 0x42, 0xd5,
|
||||||
0x17, 0xaa, 0xc6, 0x06, 0xa2, 0x0f, 0xd1, 0x2f, 0x8b, 0x46, 0x1a, 0x79, 0x43, 0x39, 0xd6, 0x26,
|
0xd8, 0x40, 0xf4, 0x21, 0xfa, 0x65, 0xd1, 0x48, 0x23, 0x6f, 0x28, 0xc7, 0xda, 0xc4, 0x14, 0x27,
|
||||||
0xa6, 0x38, 0xd1, 0xa4, 0xd5, 0x55, 0x66, 0x05, 0x4b, 0x9c, 0x68, 0xe7, 0x11, 0x34, 0xd2, 0xbd,
|
0x9a, 0xb4, 0xba, 0xca, 0xac, 0x60, 0x89, 0x13, 0xed, 0x3c, 0x82, 0x46, 0xba, 0x37, 0x64, 0xa2,
|
||||||
0x21, 0x13, 0x5d, 0xab, 0xa6, 0xd8, 0xef, 0xe2, 0x00, 0x9a, 0xb0, 0x31, 0x4a, 0x34, 0x2e, 0x84,
|
0x6b, 0xd5, 0x14, 0xfb, 0x5d, 0x1c, 0x40, 0x13, 0x36, 0x46, 0x89, 0xc6, 0x85, 0x90, 0x91, 0xf6,
|
||||||
0x8c, 0xb4, 0x2f, 0x43, 0x37, 0x20, 0x45, 0x75, 0x9e, 0x7e, 0x3b, 0xbf, 0x2e, 0x41, 0x85, 0x82,
|
0x65, 0xe8, 0x06, 0xa4, 0xa8, 0xce, 0xd3, 0x6f, 0xe7, 0xb7, 0x25, 0xa8, 0x50, 0x90, 0xb1, 0x0d,
|
||||||
0x8c, 0x6d, 0x60, 0x4c, 0x47, 0x53, 0x33, 0x83, 0xd2, 0x1e, 0xb3, 0x31, 0x0d, 0x94, 0x3d, 0x69,
|
0x8c, 0xe9, 0x68, 0x6a, 0x66, 0x50, 0xda, 0x63, 0x36, 0xa6, 0x81, 0xb2, 0x27, 0x0d, 0x69, 0xcc,
|
||||||
0x48, 0x63, 0x26, 0xad, 0x62, 0x7c, 0x05, 0xc2, 0xd3, 0x52, 0x59, 0x3b, 0xe9, 0x37, 0xda, 0x1f,
|
0xa4, 0x55, 0x8c, 0xaf, 0x40, 0x78, 0x5a, 0x2a, 0x6b, 0x27, 0xfd, 0x46, 0xfb, 0x23, 0xcc, 0x31,
|
||||||
0x61, 0x8e, 0x99, 0x2d, 0x27, 0x9a, 0xdd, 0x81, 0xaa, 0xa4, 0xc4, 0xa0, 0x5d, 0xff, 0x8e, 0x74,
|
0xb3, 0xe5, 0x44, 0xb3, 0x3b, 0x50, 0x95, 0x94, 0x18, 0xb4, 0xeb, 0xdf, 0x92, 0x2e, 0x16, 0x82,
|
||||||
0xb1, 0x10, 0x54, 0xae, 0x84, 0x3b, 0x92, 0x61, 0x30, 0xa3, 0x58, 0xa8, 0xf3, 0xf4, 0x1b, 0x43,
|
0xca, 0x95, 0x70, 0x47, 0x32, 0x0c, 0x66, 0x14, 0x0b, 0x75, 0x9e, 0x7e, 0x63, 0xa8, 0x52, 0x26,
|
||||||
0x95, 0x32, 0xe1, 0xe9, 0x2c, 0x32, 0x07, 0x63, 0xcb, 0x84, 0xea, 0x93, 0x84, 0xc9, 0x33, 0x39,
|
0x3c, 0x99, 0x45, 0xe6, 0x60, 0x6c, 0x99, 0x50, 0x7d, 0x9c, 0x30, 0x79, 0x26, 0xc7, 0xa3, 0xef,
|
||||||
0x1e, 0x7d, 0x4f, 0x27, 0xd1, 0x38, 0x1e, 0x44, 0xba, 0x73, 0x3d, 0x0b, 0xaa, 0x84, 0xc7, 0x53,
|
0xc9, 0x24, 0x1a, 0xc7, 0x83, 0x48, 0x77, 0xae, 0x67, 0x41, 0x95, 0xf0, 0x78, 0x2a, 0x45, 0xa4,
|
||||||
0x29, 0x22, 0x3d, 0xd7, 0x3b, 0x15, 0x88, 0xbc, 0x91, 0x21, 0xf7, 0x2d, 0x8f, 0xa7, 0xd2, 0x2c,
|
0xe7, 0x7a, 0xa7, 0x02, 0x91, 0x37, 0x32, 0xe4, 0xbe, 0xe5, 0xf1, 0x54, 0x9a, 0xe5, 0x0a, 0x42,
|
||||||
0x57, 0x10, 0xfa, 0x26, 0x41, 0x73, 0xb9, 0x82, 0xd8, 0x4c, 0x8e, 0x31, 0x36, 0x1c, 0x1e, 0x20,
|
0x5f, 0x27, 0x68, 0x2e, 0x57, 0x10, 0x9b, 0xc9, 0x31, 0xc6, 0x86, 0xc3, 0x03, 0x44, 0xbe, 0x91,
|
||||||
0xf2, 0xad, 0xec, 0x7c, 0x36, 0x1c, 0x6e, 0x25, 0x66, 0xb6, 0xf1, 0x34, 0xd0, 0xfd, 0x6e, 0xe7,
|
0x9d, 0xcf, 0x86, 0xc3, 0xad, 0xc4, 0xcc, 0x36, 0x9e, 0x06, 0xba, 0xdf, 0xed, 0xbc, 0x69, 0x96,
|
||||||
0x6d, 0xb3, 0x94, 0xc9, 0xb7, 0xb3, 0x96, 0x4d, 0x00, 0x97, 0x35, 0xf6, 0x7f, 0x6a, 0xe2, 0xa5,
|
0x32, 0xf9, 0x76, 0xd6, 0xb2, 0x09, 0xe0, 0xb2, 0xc6, 0xfe, 0xcf, 0x4d, 0xbc, 0x94, 0x38, 0xd1,
|
||||||
0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x5c, 0xbc, 0x14, 0x06, 0xf7, 0xa0, 0x16, 0x9f, 0xba, 0xca,
|
0x4e, 0x1f, 0xea, 0x89, 0x8b, 0x97, 0xc2, 0xe0, 0x1e, 0xd4, 0xe2, 0x53, 0x57, 0xf9, 0xe1, 0x09,
|
||||||
0x0f, 0x4f, 0x68, 0x87, 0x5a, 0x3b, 0xd7, 0xd3, 0x19, 0x0d, 0x0d, 0x1f, 0xbd, 0x48, 0x30, 0x8e,
|
0xed, 0x50, 0x6b, 0xe7, 0x7a, 0x3a, 0xa3, 0xa1, 0xe1, 0xa3, 0x17, 0x09, 0xc6, 0x91, 0x49, 0x48,
|
||||||
0x4c, 0x42, 0xea, 0x2a, 0x5d, 0x6d, 0x28, 0x4d, 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7,
|
0x5d, 0xa5, 0xab, 0x0d, 0xa5, 0xa9, 0x3f, 0x22, 0x3d, 0x2b, 0x1c, 0x49, 0xe4, 0x9c, 0xf8, 0x26,
|
||||||
0xc4, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xff, 0x26, 0x72, 0x64, 0xaa, 0xde, 0x0a, 0x27, 0x7a,
|
0x28, 0x57, 0x38, 0x92, 0xe8, 0xdf, 0x44, 0x8e, 0x4c, 0xd5, 0x5b, 0xe1, 0x44, 0xcf, 0x85, 0x5d,
|
||||||
0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0x6b, 0xf3, 0x5f, 0xb1, 0xf6, 0xcb, 0x02, 0xd4, 0x93,
|
0x65, 0x21, 0xec, 0x82, 0x64, 0x6d, 0xfe, 0x2f, 0xd6, 0x7e, 0x5d, 0x80, 0x7a, 0x52, 0xaa, 0xb1,
|
||||||
0x52, 0x8d, 0x05, 0xc3, 0x1f, 0x89, 0x50, 0xfb, 0x63, 0x5f, 0x28, 0x6b, 0x38, 0xc7, 0x61, 0xf7,
|
0x60, 0xf8, 0x23, 0x11, 0x6a, 0x7f, 0xec, 0x0b, 0x65, 0x0d, 0xe7, 0x38, 0xec, 0x1e, 0x54, 0x5c,
|
||||||
0xa0, 0xe2, 0x6a, 0xad, 0x92, 0x63, 0xf8, 0xed, 0x7c, 0x9d, 0xdf, 0xda, 0x45, 0x49, 0x2f, 0xd4,
|
0xad, 0x55, 0x72, 0x0c, 0xbf, 0x99, 0xaf, 0xf3, 0x5b, 0xbb, 0x28, 0xe9, 0x85, 0x5a, 0xcd, 0xb8,
|
||||||
0x6a, 0xc6, 0x0d, 0x6a, 0xf5, 0x63, 0x80, 0x8c, 0x89, 0xbe, 0x9e, 0x89, 0x99, 0xd5, 0x8a, 0x24,
|
0x41, 0xad, 0x7e, 0x08, 0x90, 0x31, 0xd1, 0xd7, 0x33, 0x31, 0xb3, 0x5a, 0x91, 0x64, 0x37, 0xa0,
|
||||||
0xbb, 0x01, 0x95, 0xcf, 0xdd, 0x60, 0x9a, 0x64, 0xa4, 0xf9, 0x78, 0x50, 0xfc, 0xb8, 0xe0, 0xfc,
|
0xf2, 0x99, 0x1b, 0x4c, 0x93, 0x8c, 0x34, 0x1f, 0x0f, 0x8a, 0x1f, 0x16, 0x9c, 0x3f, 0x15, 0xa1,
|
||||||
0xa1, 0x08, 0x35, 0x5b, 0xf7, 0xd9, 0x5d, 0xa8, 0x51, 0xdd, 0xb7, 0x1e, 0x5d, 0x9d, 0x7e, 0x09,
|
0x66, 0xeb, 0x3e, 0xbb, 0x0b, 0x35, 0xaa, 0xfb, 0xd6, 0xa3, 0xab, 0xd3, 0x2f, 0x81, 0xb0, 0xed,
|
||||||
0x84, 0x6d, 0xa7, 0x0d, 0x4d, 0xce, 0x47, 0xab, 0xca, 0x34, 0x36, 0xd6, 0xc7, 0xac, 0xbd, 0x29,
|
0xb4, 0xa1, 0xc9, 0xf9, 0x68, 0x55, 0x99, 0xc6, 0xc6, 0xfa, 0x98, 0xb5, 0x37, 0xa5, 0x91, 0x18,
|
||||||
0x8d, 0xc4, 0xd8, 0x76, 0x2e, 0x2d, 0xea, 0x13, 0xc4, 0xd8, 0x0f, 0x7d, 0x5c, 0x1f, 0x8e, 0x22,
|
0xdb, 0xce, 0xa5, 0x45, 0x7d, 0x82, 0x18, 0xfb, 0xa1, 0x8f, 0xeb, 0xc3, 0x51, 0xc4, 0xee, 0x26,
|
||||||
0x76, 0x37, 0x99, 0x75, 0x99, 0x34, 0xbe, 0x95, 0xd7, 0x78, 0x79, 0xd2, 0x7d, 0x68, 0xe6, 0xcc,
|
0xb3, 0x2e, 0x93, 0xc6, 0x37, 0xf2, 0x1a, 0x2f, 0x4f, 0xba, 0x0f, 0xcd, 0x9c, 0x99, 0x2b, 0x66,
|
||||||
0x5c, 0x31, 0xeb, 0xf7, 0xf3, 0xb3, 0xb6, 0x26, 0x49, 0x9d, 0x69, 0xbb, 0xb2, 0x55, 0xf8, 0x37,
|
0xfd, 0x6e, 0x7e, 0xd6, 0xd6, 0x24, 0xa9, 0x33, 0x6d, 0x57, 0xb6, 0x0a, 0xff, 0xc5, 0xfa, 0x7d,
|
||||||
0xd6, 0xef, 0x23, 0x80, 0x4c, 0xe5, 0xf7, 0x3f, 0xbe, 0x9c, 0xdf, 0x97, 0x00, 0x06, 0x11, 0x56,
|
0x00, 0x90, 0xa9, 0xfc, 0xee, 0xc7, 0x97, 0xf3, 0xc7, 0x12, 0xc0, 0x20, 0xc2, 0x2a, 0x36, 0x72,
|
||||||
0xb1, 0x91, 0x4b, 0x75, 0x77, 0xd9, 0x3f, 0x09, 0xa5, 0x12, 0x2f, 0x28, 0xcd, 0x69, 0x7c, 0x9d,
|
0xa9, 0xee, 0x2e, 0xfb, 0x27, 0xa1, 0x54, 0xe2, 0x39, 0xa5, 0x39, 0x8d, 0xaf, 0xf3, 0xa6, 0xe1,
|
||||||
0x37, 0x0d, 0x8f, 0x32, 0x86, 0xed, 0x42, 0x73, 0x24, 0x62, 0x4f, 0xf9, 0x14, 0x50, 0x76, 0xd1,
|
0x51, 0xc6, 0xb0, 0x5d, 0x68, 0x8e, 0x44, 0xec, 0x29, 0x9f, 0x02, 0xca, 0x2e, 0xfa, 0x2d, 0x9c,
|
||||||
0x6f, 0xe1, 0x9c, 0x32, 0x3d, 0x5b, 0xdd, 0x0c, 0x61, 0xd6, 0x2a, 0x3f, 0x86, 0xed, 0xc0, 0xb2,
|
0x53, 0xa6, 0x67, 0xab, 0x9b, 0x21, 0xcc, 0x5a, 0xe5, 0xc7, 0xb0, 0x1d, 0x58, 0x16, 0x17, 0x91,
|
||||||
0xb8, 0x88, 0xa4, 0xd2, 0xd6, 0x8a, 0x69, 0x0f, 0xaf, 0x99, 0x46, 0x13, 0xf9, 0x64, 0x89, 0x37,
|
0x54, 0xda, 0x5a, 0x31, 0xed, 0xe1, 0x35, 0xd3, 0x68, 0x22, 0x9f, 0x2c, 0xf1, 0xa6, 0xc8, 0x3e,
|
||||||
0x45, 0xf6, 0xc1, 0x5c, 0x28, 0x7b, 0x6e, 0x14, 0xdb, 0xa2, 0xdc, 0x59, 0xb0, 0xb7, 0xef, 0x46,
|
0x98, 0x0b, 0x65, 0xcf, 0x8d, 0x62, 0x5b, 0x94, 0x3b, 0x0b, 0xf6, 0xf6, 0xdd, 0xc8, 0x2c, 0xda,
|
||||||
0x66, 0xd1, 0xf6, 0x3e, 0xc4, 0xb9, 0xfe, 0xfc, 0x2f, 0xb7, 0xee, 0xe4, 0x3a, 0x99, 0x89, 0x3c,
|
0xde, 0xfb, 0x38, 0xd7, 0x5f, 0xfe, 0xed, 0xd6, 0x9d, 0x5c, 0x27, 0x33, 0x91, 0xc7, 0xb3, 0x6d,
|
||||||
0x9e, 0x6d, 0x53, 0xbc, 0x9c, 0xf9, 0x7a, 0x7b, 0xaa, 0xfd, 0x60, 0xdb, 0x8d, 0x7c, 0x54, 0x87,
|
0x8a, 0x97, 0x33, 0x5f, 0x6f, 0x4f, 0xb5, 0x1f, 0x6c, 0xbb, 0x91, 0x8f, 0xea, 0x70, 0x60, 0xbf,
|
||||||
0x03, 0xfb, 0x5d, 0x4e, 0xaa, 0xd9, 0xc7, 0xd0, 0x8a, 0x94, 0x3c, 0x51, 0x22, 0x8e, 0x5f, 0x50,
|
0xcb, 0x49, 0x35, 0xfb, 0x10, 0x5a, 0x91, 0x92, 0x27, 0x4a, 0xc4, 0xf1, 0x73, 0xaa, 0x6b, 0xb6,
|
||||||
0x5d, 0xb3, 0xfd, 0xe6, 0x1b, 0xb6, 0xfe, 0x92, 0xe4, 0x13, 0x14, 0xf0, 0x95, 0x28, 0xff, 0xb9,
|
0xdf, 0x7c, 0xcd, 0xd6, 0x5f, 0x92, 0x7c, 0x84, 0x02, 0xbe, 0x12, 0xe5, 0x3f, 0x57, 0x7f, 0x0c,
|
||||||
0xfa, 0x43, 0x68, 0x2f, 0xce, 0xf8, 0x75, 0x76, 0x6f, 0xf5, 0x3e, 0x34, 0xd2, 0x19, 0xbc, 0x6a,
|
0xed, 0xc5, 0x19, 0xbf, 0xca, 0xee, 0xad, 0xde, 0x87, 0x46, 0x3a, 0x83, 0x97, 0x0d, 0xac, 0xe7,
|
||||||
0x60, 0x3d, 0xbf, 0xed, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x23, 0xbb, 0x0f, 0x8d, 0x40, 0x7a, 0x2e,
|
0xb7, 0xfd, 0x0f, 0x05, 0xa8, 0x9a, 0x7c, 0x64, 0xf7, 0xa1, 0x11, 0x48, 0xcf, 0x45, 0x07, 0x92,
|
||||||
0x3a, 0x90, 0xf4, 0xf6, 0xef, 0x64, 0xe9, 0xba, 0xf5, 0x38, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18,
|
0xde, 0xfe, 0xad, 0x2c, 0x5d, 0xb7, 0x3e, 0x49, 0x64, 0x66, 0x3f, 0x32, 0x2c, 0x86, 0xa7, 0x1f,
|
||||||
0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e,
|
0x8e, 0x65, 0x92, 0x3f, 0xad, 0x6c, 0x50, 0x3f, 0x1c, 0x4b, 0x6e, 0x84, 0xab, 0x8f, 0xa0, 0x35,
|
||||||
0x82, 0xd6, 0xbc, 0x8a, 0x2b, 0xfc, 0x7c, 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x6e,
|
0xaf, 0xe2, 0x0a, 0x3f, 0xdf, 0x99, 0x0f, 0x74, 0xaa, 0x06, 0xe9, 0xa0, 0xbc, 0xdb, 0xf7, 0xa1,
|
||||||
0xdf, 0x87, 0x46, 0xca, 0x67, 0x9b, 0x97, 0x1d, 0x5f, 0xce, 0x8f, 0xcc, 0xf9, 0xea, 0x04, 0x00,
|
0x91, 0xf2, 0xd9, 0xe6, 0x65, 0xc7, 0x97, 0xf3, 0x23, 0x73, 0xbe, 0x3a, 0x01, 0x40, 0xe6, 0x1a,
|
||||||
0x99, 0x6b, 0x78, 0xcc, 0xe1, 0x25, 0x22, 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d,
|
0x1e, 0x73, 0x78, 0x89, 0x08, 0xb3, 0xe6, 0x21, 0xfd, 0xa6, 0xda, 0xeb, 0x6a, 0x97, 0x5c, 0x59,
|
||||||
0x72, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, 0x46, 0x69, 0xaa, 0x7f, 0xc7, 0x01, 0x90, 0x43, 0x38,
|
0xe6, 0x44, 0xb3, 0x2d, 0x80, 0x51, 0x9a, 0xea, 0xdf, 0x72, 0x00, 0xe4, 0x10, 0xce, 0x00, 0xea,
|
||||||
0x03, 0xa8, 0x27, 0x4e, 0xb0, 0x75, 0x68, 0xc6, 0xd6, 0x32, 0xf6, 0xba, 0x68, 0xae, 0xc2, 0xf3,
|
0x89, 0x13, 0x6c, 0x1d, 0x9a, 0xb1, 0xb5, 0x8c, 0xbd, 0x2e, 0x9a, 0xab, 0xf0, 0x3c, 0x0b, 0x7b,
|
||||||
0x2c, 0xec, 0x59, 0x95, 0x1b, 0x9e, 0x88, 0xb9, 0x9e, 0x95, 0x23, 0x87, 0x5b, 0x81, 0xf3, 0x19,
|
0x56, 0xe5, 0x86, 0x27, 0x62, 0xae, 0x67, 0xe5, 0xc8, 0xe1, 0x56, 0xe0, 0x7c, 0x0a, 0x15, 0x62,
|
||||||
0x54, 0x88, 0x81, 0x09, 0x1a, 0x6b, 0x57, 0x69, 0xdb, 0xfe, 0x9a, 0x0e, 0x4f, 0xc6, 0x64, 0x76,
|
0x60, 0x82, 0xc6, 0xda, 0x55, 0xda, 0xb6, 0xbf, 0xa6, 0xc3, 0x93, 0x31, 0x99, 0xdd, 0x2b, 0x63,
|
||||||
0xaf, 0x8c, 0x21, 0xcc, 0x0d, 0x80, 0xbd, 0x8f, 0x7d, 0xe4, 0xc8, 0xae, 0xe8, 0x55, 0x38, 0x14,
|
0x08, 0x73, 0x03, 0x60, 0xef, 0x62, 0x1f, 0x39, 0xb2, 0x2b, 0x7a, 0x15, 0x0e, 0xc5, 0xce, 0x8f,
|
||||||
0x3b, 0x3f, 0x80, 0x7a, 0xc2, 0xc6, 0x99, 0x3f, 0xf6, 0x43, 0x61, 0x5d, 0x24, 0x1a, 0xaf, 0x0d,
|
0xa0, 0x9e, 0xb0, 0x71, 0xe6, 0x81, 0x1f, 0x0a, 0xeb, 0x22, 0xd1, 0x78, 0x6d, 0xf0, 0x4e, 0x5d,
|
||||||
0xfb, 0xa7, 0xae, 0x72, 0x3d, 0x2d, 0x4c, 0x9b, 0x52, 0xe1, 0x19, 0xc3, 0x79, 0x0f, 0x9a, 0xb9,
|
0xe5, 0x7a, 0x5a, 0x98, 0x36, 0xa5, 0xc2, 0x33, 0x86, 0xf3, 0x0e, 0x34, 0x73, 0x79, 0x87, 0xe1,
|
||||||
0xbc, 0xc3, 0x70, 0x7b, 0x4e, 0xdb, 0x68, 0xb2, 0xdf, 0x7c, 0x38, 0x9f, 0xc0, 0xca, 0x5c, 0x0e,
|
0xf6, 0x8c, 0xb6, 0xd1, 0x64, 0xbf, 0xf9, 0x70, 0x3e, 0x82, 0x95, 0xb9, 0x1c, 0xc0, 0x62, 0xe5,
|
||||||
0x60, 0xb1, 0xf2, 0x47, 0x49, 0xb1, 0x32, 0x85, 0xe8, 0x52, 0xb7, 0xc5, 0xa0, 0x7c, 0x2e, 0xdc,
|
0x8f, 0x92, 0x62, 0x65, 0x0a, 0xd1, 0xa5, 0x6e, 0x8b, 0x41, 0xf9, 0x5c, 0xb8, 0x67, 0xb6, 0xd3,
|
||||||
0x33, 0xdb, 0x69, 0x11, 0xed, 0xfc, 0x16, 0x6f, 0x47, 0x49, 0x0f, 0xfb, 0xbf, 0x00, 0xa7, 0x5a,
|
0x22, 0xda, 0xf9, 0x3d, 0xde, 0x8e, 0x92, 0x1e, 0xf6, 0xfb, 0x00, 0xa7, 0x5a, 0x47, 0xcf, 0xa9,
|
||||||
0x47, 0x2f, 0xa8, 0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad,
|
0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad, 0xdc, 0xa8, 0xa6,
|
||||||
0xdc, 0xa8, 0xa6, 0x11, 0xb1, 0x01, 0xfc, 0x0f, 0x34, 0xc6, 0xe9, 0xf0, 0x92, 0x8d, 0x81, 0x64,
|
0x11, 0xb1, 0x01, 0x7c, 0x0f, 0x1a, 0xe3, 0x74, 0x78, 0xc9, 0xc6, 0x40, 0x32, 0xfa, 0x2d, 0xa8,
|
||||||
0xf4, 0x3b, 0x50, 0x0f, 0xa5, 0x95, 0x99, 0x1e, 0xbb, 0x16, 0xca, 0x74, 0x9c, 0x1b, 0x04, 0x56,
|
0x87, 0xd2, 0xca, 0x4c, 0x8f, 0x5d, 0x0b, 0x65, 0x3a, 0xce, 0x0d, 0x02, 0x2b, 0xab, 0x98, 0x71,
|
||||||
0x56, 0x31, 0xe3, 0xdc, 0x20, 0x20, 0xa1, 0x73, 0x07, 0xde, 0xb8, 0x74, 0xcf, 0x63, 0x6f, 0x41,
|
0x6e, 0x10, 0x90, 0xd0, 0xb9, 0x03, 0xaf, 0x5d, 0xba, 0xe7, 0xb1, 0x37, 0xa0, 0x3a, 0xf6, 0x03,
|
||||||
0x75, 0xec, 0x07, 0x9a, 0x8a, 0x12, 0xf6, 0xf4, 0xf6, 0xcb, 0xf9, 0x47, 0x01, 0x20, 0x8b, 0x1f,
|
0x4d, 0x45, 0x09, 0x7b, 0x7a, 0xfb, 0xe5, 0xfc, 0xab, 0x00, 0x90, 0xc5, 0x0f, 0x66, 0x05, 0x56,
|
||||||
0xcc, 0x0a, 0xac, 0x2e, 0x88, 0x59, 0x36, 0xd5, 0x24, 0x80, 0xfa, 0xc4, 0x9e, 0x53, 0x36, 0x32,
|
0x17, 0xc4, 0x2c, 0x9b, 0x6a, 0x12, 0x40, 0x7d, 0x62, 0xcf, 0x29, 0x1b, 0x19, 0x37, 0xe7, 0x63,
|
||||||
0x6e, 0xce, 0xc7, 0xdc, 0x56, 0x72, 0x8c, 0x99, 0x13, 0x6c, 0xc7, 0x9e, 0x60, 0xaf, 0x73, 0x17,
|
0x6e, 0x2b, 0x39, 0xc6, 0xcc, 0x09, 0xb6, 0x63, 0x4f, 0xb0, 0x57, 0xb9, 0x8b, 0xa5, 0x16, 0xa8,
|
||||||
0x4b, 0x2d, 0x50, 0xa3, 0x95, 0xbf, 0x9a, 0x43, 0x96, 0xce, 0xdc, 0x4a, 0x56, 0x1f, 0xc1, 0xca,
|
0xd1, 0xca, 0x5f, 0xcd, 0x21, 0x4b, 0x67, 0x6e, 0x25, 0xab, 0x8f, 0x60, 0x65, 0xce, 0xe4, 0x77,
|
||||||
0x9c, 0xc9, 0xef, 0x59, 0xb3, 0xb2, 0xf3, 0x36, 0x9f, 0xcb, 0x3b, 0x50, 0x35, 0x77, 0x7a, 0xb6,
|
0xac, 0x59, 0xd9, 0x79, 0x9b, 0xcf, 0xe5, 0x1d, 0xa8, 0x9a, 0x3b, 0x3d, 0xdb, 0x80, 0x9a, 0xeb,
|
||||||
0x01, 0x35, 0xd7, 0x33, 0x69, 0x9c, 0x3b, 0x4a, 0x50, 0xb8, 0x4b, 0x6c, 0x9e, 0x88, 0x9d, 0x3f,
|
0x99, 0x34, 0xce, 0x1d, 0x25, 0x28, 0xdc, 0x25, 0x36, 0x4f, 0xc4, 0xce, 0x5f, 0x8a, 0x00, 0x19,
|
||||||
0x15, 0x01, 0x32, 0xfe, 0x6b, 0x74, 0xdb, 0x0f, 0xa0, 0x15, 0x0b, 0x4f, 0x86, 0x23, 0x57, 0xcd,
|
0xff, 0x15, 0xba, 0xed, 0x07, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x97,
|
||||||
0x48, 0x6a, 0x2f, 0x9d, 0x57, 0x0d, 0x59, 0x40, 0xe6, 0x3a, 0xef, 0xd2, 0xab, 0x3b, 0xef, 0x0d,
|
0xce, 0xab, 0x86, 0x2c, 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xe5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19,
|
||||||
0x28, 0x7b, 0x32, 0x9a, 0xd9, 0xd2, 0xc4, 0xe6, 0x27, 0xb2, 0x2f, 0xa3, 0xd9, 0xc1, 0x12, 0x27,
|
0xcd, 0x6c, 0x69, 0x62, 0xf3, 0x13, 0xd9, 0x97, 0xd1, 0xec, 0x60, 0x89, 0x13, 0x82, 0x6d, 0x41,
|
||||||
0x04, 0xdb, 0x82, 0xea, 0xe4, 0x8c, 0x5e, 0x39, 0xcc, 0x6d, 0xed, 0xc6, 0x3c, 0xf6, 0xc9, 0x19,
|
0x75, 0x72, 0x46, 0xaf, 0x1c, 0xe6, 0xb6, 0x76, 0x63, 0x1e, 0xfb, 0xf8, 0x0c, 0xe9, 0x83, 0x25,
|
||||||
0xd2, 0x07, 0x4b, 0xdc, 0xa2, 0xd8, 0x1d, 0xa8, 0x4c, 0xce, 0x46, 0xbe, 0xb2, 0xc5, 0xe5, 0xfa,
|
0x6e, 0x51, 0xec, 0x0e, 0x54, 0x26, 0x67, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x7d, 0x11, 0xde, 0xf5,
|
||||||
0x22, 0xbc, 0xeb, 0x2b, 0x7a, 0xd4, 0x40, 0x0c, 0x73, 0xa0, 0xa8, 0x26, 0xf6, 0x49, 0xa3, 0xbd,
|
0x15, 0x3d, 0x6a, 0x20, 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0xa4, 0xd1, 0x5e, 0x58, 0xcd, 0xc9,
|
||||||
0xb0, 0x9a, 0x93, 0x83, 0x25, 0x5e, 0x54, 0x93, 0xbd, 0x3a, 0x54, 0xcd, 0xba, 0x3a, 0x7f, 0x2f,
|
0xc1, 0x12, 0x2f, 0xaa, 0xc9, 0x5e, 0x1d, 0xaa, 0x66, 0x5d, 0x9d, 0x7f, 0x96, 0xa0, 0x35, 0xef,
|
||||||
0x41, 0x6b, 0xde, 0x4b, 0xdc, 0xd9, 0x58, 0x79, 0xc9, 0xce, 0xc6, 0xca, 0x4b, 0x2f, 0x25, 0xc5,
|
0x25, 0xee, 0x6c, 0xac, 0xbc, 0x64, 0x67, 0x63, 0xe5, 0xa5, 0x97, 0x92, 0x62, 0xee, 0x52, 0xe2,
|
||||||
0xdc, 0xa5, 0xc4, 0x81, 0x8a, 0x3c, 0x0f, 0x85, 0xca, 0x3f, 0xe7, 0xec, 0x9f, 0xca, 0xf3, 0x10,
|
0x40, 0x45, 0x9e, 0x87, 0x42, 0xe5, 0x9f, 0x73, 0xf6, 0x4f, 0xe5, 0x79, 0x88, 0x8d, 0xb1, 0x11,
|
||||||
0x1b, 0x63, 0x23, 0x9a, 0xeb, 0x33, 0x2b, 0xb6, 0xcf, 0x7c, 0x1f, 0x56, 0xc6, 0x32, 0x08, 0xe4,
|
0xcd, 0xf5, 0x99, 0x15, 0xdb, 0x67, 0xbe, 0x0b, 0x2b, 0x63, 0x19, 0x04, 0xf2, 0x7c, 0x38, 0x9b,
|
||||||
0xf9, 0x70, 0x36, 0x09, 0xfc, 0xf0, 0xcc, 0x36, 0x9b, 0xf3, 0x4c, 0xb6, 0x01, 0xd7, 0x46, 0xbe,
|
0x04, 0x7e, 0x78, 0x66, 0x9b, 0xcd, 0x79, 0x26, 0xdb, 0x80, 0x6b, 0x23, 0x5f, 0xa1, 0x3b, 0xfb,
|
||||||
0x42, 0x77, 0xf6, 0x65, 0xa8, 0x45, 0x48, 0x97, 0x55, 0xc4, 0x2d, 0xb2, 0xd9, 0xa7, 0xb0, 0xee,
|
0x32, 0xd4, 0x22, 0xa4, 0xcb, 0x2a, 0xe2, 0x16, 0xd9, 0xec, 0x63, 0x58, 0x77, 0xb5, 0x16, 0x93,
|
||||||
0x6a, 0x2d, 0x26, 0x91, 0x7e, 0x16, 0x46, 0xae, 0x77, 0xd6, 0x95, 0x1e, 0x65, 0xe1, 0x24, 0x72,
|
0x48, 0x3f, 0x0d, 0x23, 0xd7, 0x3b, 0xeb, 0x4a, 0x8f, 0xb2, 0x70, 0x12, 0xb9, 0xda, 0x3f, 0xf6,
|
||||||
0xb5, 0x7f, 0xec, 0x07, 0x78, 0x89, 0xaf, 0xd1, 0xd0, 0x57, 0xe2, 0xd8, 0x07, 0xd0, 0xf2, 0x94,
|
0x03, 0xbc, 0xc4, 0xd7, 0x68, 0xe8, 0x4b, 0x71, 0xec, 0x3d, 0x68, 0x79, 0x4a, 0xb8, 0x5a, 0x74,
|
||||||
0x70, 0xb5, 0xe8, 0x8a, 0x58, 0x1f, 0xb9, 0xfa, 0xb4, 0x53, 0xa7, 0x91, 0x0b, 0x5c, 0x9c, 0x83,
|
0x45, 0xac, 0x8f, 0x5c, 0x7d, 0xda, 0xa9, 0xd3, 0xc8, 0x05, 0x2e, 0xce, 0xc1, 0x45, 0x6f, 0x3f,
|
||||||
0x8b, 0xde, 0x7e, 0xe6, 0x07, 0x23, 0x0f, 0xaf, 0x97, 0x0d, 0x33, 0x87, 0x39, 0x26, 0xdb, 0x02,
|
0xf5, 0x83, 0x91, 0x87, 0xd7, 0xcb, 0x86, 0x99, 0xc3, 0x1c, 0x93, 0x6d, 0x01, 0x23, 0x46, 0x6f,
|
||||||
0x46, 0x8c, 0xde, 0x24, 0xd2, 0xb3, 0x14, 0x0a, 0x04, 0xbd, 0x42, 0x82, 0x07, 0xae, 0xf6, 0x27,
|
0x12, 0xe9, 0x59, 0x0a, 0x05, 0x82, 0x5e, 0x21, 0xc1, 0x03, 0x57, 0xfb, 0x13, 0x11, 0x6b, 0x77,
|
||||||
0x22, 0xd6, 0xee, 0x24, 0xa2, 0xf7, 0xa3, 0x12, 0xcf, 0x18, 0xec, 0x36, 0xb4, 0xfd, 0xd0, 0x0b,
|
0x12, 0xd1, 0xfb, 0x51, 0x89, 0x67, 0x0c, 0x76, 0x1b, 0xda, 0x7e, 0xe8, 0x05, 0xd3, 0x91, 0x78,
|
||||||
0xa6, 0x23, 0xf1, 0x22, 0xc2, 0x89, 0xa8, 0x30, 0xee, 0x2c, 0xd3, 0xa9, 0x72, 0xcd, 0xf2, 0x8f,
|
0x1e, 0xe1, 0x44, 0x54, 0x18, 0x77, 0x96, 0xe9, 0x54, 0xb9, 0x66, 0xf9, 0x47, 0x96, 0x8d, 0x50,
|
||||||
0x2c, 0x1b, 0xa1, 0xe2, 0x62, 0x01, 0xba, 0x62, 0xa0, 0x96, 0x9f, 0x40, 0x9d, 0x2f, 0x0a, 0xd0,
|
0x71, 0xb1, 0x00, 0x5d, 0x31, 0x50, 0xcb, 0x4f, 0xa0, 0xce, 0xe7, 0x05, 0x68, 0x2f, 0x06, 0x1e,
|
||||||
0x5e, 0x0c, 0x3c, 0xdc, 0xb6, 0x08, 0x27, 0x6f, 0x2f, 0xd7, 0x48, 0xa7, 0x5b, 0x59, 0xcc, 0x6d,
|
0x6e, 0x5b, 0x84, 0x93, 0xb7, 0x97, 0x6b, 0xa4, 0xd3, 0xad, 0x2c, 0xe6, 0xb6, 0x32, 0xa9, 0x97,
|
||||||
0x65, 0x52, 0x2f, 0x4b, 0xb9, 0x7a, 0x99, 0x86, 0x45, 0xf9, 0xbb, 0xc3, 0x62, 0x6e, 0xa2, 0x95,
|
0xa5, 0x5c, 0xbd, 0x4c, 0xc3, 0xa2, 0xfc, 0xed, 0x61, 0x31, 0x37, 0xd1, 0xca, 0xc2, 0x44, 0x9d,
|
||||||
0x85, 0x89, 0x3a, 0xbf, 0x29, 0xc0, 0xb5, 0x85, 0xe0, 0xfe, 0xde, 0x1e, 0xad, 0x43, 0x73, 0xe2,
|
0xdf, 0x15, 0xe0, 0xda, 0x42, 0x70, 0x7f, 0x67, 0x8f, 0xd6, 0xa1, 0x39, 0x71, 0xcf, 0x84, 0x79,
|
||||||
0x9e, 0x09, 0xf3, 0xb8, 0x10, 0xdb, 0x12, 0x92, 0x67, 0xfd, 0x07, 0xfc, 0x0b, 0x61, 0x39, 0x9f,
|
0x5c, 0x88, 0x6d, 0x09, 0xc9, 0xb3, 0xfe, 0x07, 0xfe, 0x85, 0xb0, 0x9c, 0xcf, 0xa8, 0x2b, 0x7d,
|
||||||
0x51, 0x57, 0xfa, 0x96, 0x04, 0xc8, 0xa1, 0xd4, 0x0f, 0xe5, 0xd4, 0xd6, 0xe2, 0x24, 0x40, 0x12,
|
0x4b, 0x02, 0xe4, 0x50, 0xea, 0x87, 0x72, 0x6a, 0x6b, 0x71, 0x12, 0x20, 0x09, 0xf3, 0x72, 0x18,
|
||||||
0xe6, 0xe5, 0x30, 0x2a, 0x5d, 0x11, 0x46, 0xce, 0x21, 0xd4, 0x13, 0x07, 0xd9, 0x2d, 0xfb, 0xfa,
|
0x95, 0xae, 0x08, 0x23, 0xe7, 0x10, 0xea, 0x89, 0x83, 0xec, 0x96, 0x7d, 0xfd, 0x29, 0x64, 0x8f,
|
||||||
0x53, 0xc8, 0x1e, 0x35, 0x9f, 0xc5, 0x42, 0xa1, 0xef, 0xe6, 0x29, 0xe8, 0x5d, 0xa8, 0x98, 0x36,
|
0x9a, 0x4f, 0x63, 0xa1, 0xd0, 0x77, 0xf3, 0x14, 0xf4, 0x36, 0x54, 0x4c, 0x1b, 0x5a, 0xbc, 0x8c,
|
||||||
0xb4, 0x78, 0x19, 0x61, 0x24, 0xce, 0x10, 0x6a, 0x96, 0xc3, 0x36, 0xa1, 0x7a, 0x3c, 0x4b, 0xdf,
|
0x30, 0x12, 0x67, 0x08, 0x35, 0xcb, 0x61, 0x9b, 0x50, 0x3d, 0x9e, 0xa5, 0xef, 0x28, 0xf6, 0xb8,
|
||||||
0x51, 0xec, 0x71, 0x81, 0xdf, 0x23, 0x8b, 0xc0, 0x33, 0xc8, 0x20, 0xd8, 0x0d, 0x28, 0x1f, 0xcf,
|
0xc0, 0xef, 0x91, 0x45, 0xe0, 0x19, 0x64, 0x10, 0xec, 0x06, 0x94, 0x8f, 0x67, 0xfd, 0xae, 0xb9,
|
||||||
0xfa, 0x5d, 0x73, 0xb1, 0xc4, 0x93, 0x0c, 0xbf, 0xf6, 0xaa, 0xc6, 0x21, 0xe7, 0x31, 0x2c, 0xe7,
|
0x58, 0xe2, 0x49, 0x86, 0x5f, 0x7b, 0x55, 0xe3, 0x90, 0xf3, 0x09, 0x2c, 0xe7, 0xc7, 0xa5, 0x85,
|
||||||
0xc7, 0xa5, 0x85, 0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd5, 0x0d, 0xe3, 0x23, 0x00,
|
0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd9, 0x0d, 0xe3, 0x03, 0x00, 0x7a, 0xab, 0x7d,
|
||||||
0x7a, 0xab, 0x7d, 0xdd, 0x9b, 0xc9, 0xff, 0x43, 0xcd, 0xbe, 0xf1, 0xb2, 0x0f, 0x16, 0xde, 0xac,
|
0xd5, 0x9b, 0xc9, 0x0f, 0xa1, 0x66, 0xdf, 0x78, 0xd9, 0x7b, 0x0b, 0x6f, 0xd6, 0xad, 0xf4, 0x01,
|
||||||
0x5b, 0xe9, 0x03, 0xf0, 0xdc, 0xc3, 0xb5, 0xf3, 0x00, 0x7b, 0xd4, 0x73, 0xa1, 0xba, 0xfe, 0x78,
|
0x78, 0xee, 0xe1, 0xda, 0x79, 0x80, 0x3d, 0xea, 0xb9, 0x50, 0x5d, 0x7f, 0x3c, 0x7e, 0x55, 0x73,
|
||||||
0xfc, 0xba, 0xe6, 0x1e, 0x40, 0xeb, 0x59, 0x14, 0xfd, 0x6b, 0x63, 0x7f, 0x02, 0x55, 0xf3, 0xd4,
|
0x0f, 0xa0, 0xf5, 0x34, 0x8a, 0xfe, 0xb3, 0xb1, 0x3f, 0x83, 0xaa, 0x79, 0x6a, 0xc6, 0x31, 0x01,
|
||||||
0x8c, 0x63, 0x02, 0xf4, 0xc0, 0xee, 0x01, 0x33, 0x7d, 0x6c, 0xde, 0x25, 0x6e, 0x00, 0x88, 0x9c,
|
0x7a, 0x60, 0xf7, 0x80, 0x99, 0x3e, 0x36, 0xef, 0x12, 0x37, 0x00, 0x44, 0x4e, 0xd1, 0x9e, 0xdd,
|
||||||
0xa2, 0x3d, 0xbb, 0xb9, 0x84, 0x9c, 0x77, 0x80, 0x1b, 0xc0, 0xe6, 0x06, 0xd4, 0xec, 0xab, 0x26,
|
0x5c, 0x42, 0xce, 0x3b, 0xc0, 0x0d, 0x60, 0x73, 0x03, 0x6a, 0xf6, 0x55, 0x93, 0x35, 0xa0, 0xf2,
|
||||||
0x6b, 0x40, 0xe5, 0xd9, 0xe1, 0xb0, 0xf7, 0xb4, 0xbd, 0xc4, 0xea, 0x50, 0x3e, 0x18, 0x0c, 0x9f,
|
0xf4, 0x70, 0xd8, 0x7b, 0xd2, 0x5e, 0x62, 0x75, 0x28, 0x1f, 0x0c, 0x86, 0x4f, 0xda, 0x05, 0xa4,
|
||||||
0xb6, 0x0b, 0x48, 0x1d, 0x0e, 0x0e, 0x7b, 0xed, 0xe2, 0xe6, 0x6d, 0x58, 0xce, 0xbf, 0x6b, 0xb2,
|
0x0e, 0x07, 0x87, 0xbd, 0x76, 0x71, 0xf3, 0x36, 0x2c, 0xe7, 0xdf, 0x35, 0x59, 0x13, 0x6a, 0xc3,
|
||||||
0x26, 0xd4, 0x86, 0xbb, 0x87, 0xdd, 0xbd, 0xc1, 0x8f, 0xdb, 0x4b, 0x6c, 0x19, 0xea, 0xfd, 0xc3,
|
0xdd, 0xc3, 0xee, 0xde, 0xe0, 0xa7, 0xed, 0x25, 0xb6, 0x0c, 0xf5, 0xfe, 0xe1, 0xb0, 0xb7, 0xff,
|
||||||
0x61, 0x6f, 0xff, 0x19, 0xef, 0xb5, 0x0b, 0x9b, 0x3f, 0x82, 0x46, 0xfa, 0x50, 0x84, 0x1a, 0xf6,
|
0x94, 0xf7, 0xda, 0x85, 0xcd, 0x9f, 0x40, 0x23, 0x7d, 0x28, 0x42, 0x0d, 0x7b, 0xfd, 0xc3, 0x6e,
|
||||||
0xfa, 0x87, 0xdd, 0xf6, 0x12, 0x03, 0xa8, 0x0e, 0x7b, 0xfb, 0xbc, 0x87, 0x7a, 0x6b, 0x50, 0x1a,
|
0x7b, 0x89, 0x01, 0x54, 0x87, 0xbd, 0x7d, 0xde, 0x43, 0xbd, 0x35, 0x28, 0x0d, 0x87, 0x07, 0xed,
|
||||||
0x0e, 0x0f, 0xda, 0x45, 0xb4, 0xba, 0xbf, 0xbb, 0x7f, 0xd0, 0x6b, 0x97, 0x90, 0x7c, 0xfa, 0xe4,
|
0x22, 0x5a, 0xdd, 0xdf, 0xdd, 0x3f, 0xe8, 0xb5, 0x4b, 0x48, 0x3e, 0x79, 0x7c, 0xf4, 0x70, 0xd8,
|
||||||
0xe8, 0xe1, 0xb0, 0x5d, 0xde, 0xfc, 0x08, 0xae, 0x2d, 0x3c, 0xa1, 0xd0, 0xe8, 0x83, 0x5d, 0xde,
|
0x2e, 0x6f, 0x7e, 0x00, 0xd7, 0x16, 0x9e, 0x50, 0x68, 0xf4, 0xc1, 0x2e, 0xef, 0xa1, 0xa6, 0x26,
|
||||||
0x43, 0x4d, 0x4d, 0xa8, 0x1d, 0xf1, 0xfe, 0xf3, 0xdd, 0xa7, 0xbd, 0x76, 0x01, 0x05, 0x8f, 0x07,
|
0xd4, 0x8e, 0x78, 0xff, 0xd9, 0xee, 0x93, 0x5e, 0xbb, 0x80, 0x82, 0x4f, 0x06, 0xfb, 0x8f, 0x7a,
|
||||||
0xfb, 0x8f, 0x7a, 0xdd, 0x76, 0x71, 0xef, 0xe6, 0x97, 0x2f, 0xd7, 0x0a, 0x5f, 0xbd, 0x5c, 0x2b,
|
0xdd, 0x76, 0x71, 0xef, 0xe6, 0x17, 0x2f, 0xd6, 0x0a, 0x5f, 0xbe, 0x58, 0x2b, 0x7c, 0xf5, 0x62,
|
||||||
0x7c, 0xfd, 0x72, 0xad, 0xf0, 0xd7, 0x97, 0x6b, 0x85, 0x2f, 0xbe, 0x5d, 0x5b, 0xfa, 0xea, 0xdb,
|
0xad, 0xf0, 0xf7, 0x17, 0x6b, 0x85, 0xcf, 0xbf, 0x59, 0x5b, 0xfa, 0xf2, 0x9b, 0xb5, 0xa5, 0xaf,
|
||||||
0xb5, 0xa5, 0xaf, 0xbf, 0x5d, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0x1f, 0xfe, 0x33, 0x00, 0x00,
|
0xbe, 0x59, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0xef, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1,
|
||||||
0xff, 0xff, 0x92, 0xc4, 0x20, 0x2a, 0xec, 0x18, 0x00, 0x00,
|
0x92, 0xe3, 0x09, 0xec, 0x18, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Op) Marshal() (dAtA []byte, err error) {
|
func (m *Op) Marshal() (dAtA []byte, err error) {
|
||||||
|
4
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
4
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
@@ -243,8 +243,8 @@ message Range {
|
|||||||
|
|
||||||
// Position is single location in a source file
|
// Position is single location in a source file
|
||||||
message Position {
|
message Position {
|
||||||
int32 Line = 1;
|
int32 line = 1;
|
||||||
int32 Character = 2;
|
int32 character = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExportCache {
|
message ExportCache {
|
||||||
|
95
vendor/github.com/moby/buildkit/util/gitutil/git_ref.go
generated
vendored
Normal file
95
vendor/github.com/moby/buildkit/util/gitutil/git_ref.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package gitutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GitRef represents a git ref.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
|
||||||
|
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
|
||||||
|
type GitRef struct {
|
||||||
|
// Remote is the remote repository path.
|
||||||
|
Remote string
|
||||||
|
|
||||||
|
// ShortName is the directory name of the repo.
|
||||||
|
// e.g., "bar" for "https://github.com/foo/bar.git"
|
||||||
|
ShortName string
|
||||||
|
|
||||||
|
// Commit is a commit hash, a tag, or branch name.
|
||||||
|
// Commit is optional.
|
||||||
|
Commit string
|
||||||
|
|
||||||
|
// SubDir is a directory path inside the repo.
|
||||||
|
// SubDir is optional.
|
||||||
|
SubDir string
|
||||||
|
|
||||||
|
// IndistinguishableFromLocal is true for a ref that is indistinguishable from a local file path,
|
||||||
|
// e.g., "github.com/foo/bar".
|
||||||
|
//
|
||||||
|
// Deprecated.
|
||||||
|
// Instead, use a distinguishable form such as "https://github.com/foo/bar.git".
|
||||||
|
//
|
||||||
|
// The dockerfile frontend still accepts this form only for build contexts.
|
||||||
|
IndistinguishableFromLocal bool
|
||||||
|
|
||||||
|
// UnencryptedTCP is true for a ref that needs an unencrypted TCP connection,
|
||||||
|
// e.g., "git://..." and "http://..." .
|
||||||
|
//
|
||||||
|
// Discouraged, although not deprecated.
|
||||||
|
// Instead, consider using an encrypted TCP connection such as "git@github.com/foo/bar.git" or "https://github.com/foo/bar.git".
|
||||||
|
UnencryptedTCP bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||||
|
|
||||||
|
// ParseGitRef parses a git ref.
|
||||||
|
func ParseGitRef(ref string) (*GitRef, error) {
|
||||||
|
res := &GitRef{}
|
||||||
|
|
||||||
|
if strings.HasPrefix(ref, "github.com/") {
|
||||||
|
res.IndistinguishableFromLocal = true // Deprecated
|
||||||
|
} else {
|
||||||
|
_, proto := ParseProtocol(ref)
|
||||||
|
switch proto {
|
||||||
|
case UnknownProtocol:
|
||||||
|
return nil, errdefs.ErrInvalidArgument
|
||||||
|
}
|
||||||
|
switch proto {
|
||||||
|
case HTTPProtocol, GitProtocol:
|
||||||
|
res.UnencryptedTCP = true // Discouraged, but not deprecated
|
||||||
|
}
|
||||||
|
switch proto {
|
||||||
|
// An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix.
|
||||||
|
case HTTPProtocol, HTTPSProtocol:
|
||||||
|
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||||
|
if !gitURLPathWithFragmentSuffix.MatchString(ref) {
|
||||||
|
return nil, errdefs.ErrInvalidArgument
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
refSplitBySharp := strings.SplitN(ref, "#", 2)
|
||||||
|
res.Remote = refSplitBySharp[0]
|
||||||
|
if len(res.Remote) == 0 {
|
||||||
|
return res, errdefs.ErrInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(refSplitBySharp) > 1 {
|
||||||
|
refSplitBySharpSplitByColon := strings.SplitN(refSplitBySharp[1], ":", 2)
|
||||||
|
res.Commit = refSplitBySharpSplitByColon[0]
|
||||||
|
if len(res.Commit) == 0 {
|
||||||
|
return res, errdefs.ErrInvalidArgument
|
||||||
|
}
|
||||||
|
if len(refSplitBySharpSplitByColon) > 1 {
|
||||||
|
res.SubDir = refSplitBySharpSplitByColon[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
repoSplitBySlash := strings.Split(res.Remote, "/")
|
||||||
|
res.ShortName = strings.TrimSuffix(repoSplitBySlash[len(repoSplitBySlash)-1], ".git")
|
||||||
|
return res, nil
|
||||||
|
}
|
8
vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go
generated
vendored
8
vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go
generated
vendored
@@ -42,6 +42,14 @@ func ToGRPC(err error) error {
|
|||||||
st = status.FromProto(pb)
|
st = status.FromProto(pb)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the original error was wrapped with more context than the GRPCStatus error,
|
||||||
|
// copy the original message to the GRPCStatus error
|
||||||
|
if err.Error() != st.Message() {
|
||||||
|
pb := st.Proto()
|
||||||
|
pb.Message = err.Error()
|
||||||
|
st = status.FromProto(pb)
|
||||||
|
}
|
||||||
|
|
||||||
var details []proto.Message
|
var details []proto.Message
|
||||||
|
|
||||||
for _, st := range stack.Traces(err) {
|
for _, st := range stack.Traces(err) {
|
||||||
|
17
vendor/github.com/moby/buildkit/util/progress/progress.go
generated
vendored
17
vendor/github.com/moby/buildkit/util/progress/progress.go
generated
vendored
@@ -274,3 +274,20 @@ func (pw *noOpWriter) Write(_ string, _ interface{}) error {
|
|||||||
func (pw *noOpWriter) Close() error {
|
func (pw *noOpWriter) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OneOff(ctx context.Context, id string) func(err error) error {
|
||||||
|
pw, _, _ := NewFromContext(ctx)
|
||||||
|
now := time.Now()
|
||||||
|
st := Status{
|
||||||
|
Started: &now,
|
||||||
|
}
|
||||||
|
pw.Write(id, st)
|
||||||
|
return func(err error) error {
|
||||||
|
// TODO: set error on status
|
||||||
|
now := time.Now()
|
||||||
|
st.Completed = &now
|
||||||
|
pw.Write(id, st)
|
||||||
|
pw.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user