test: add basic integration tests

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2023-05-15 18:48:58 +01:00
parent e61a8cf637
commit 2d124e0ce9
73 changed files with 8537 additions and 2 deletions

View File

@ -0,0 +1,16 @@
package dockerd
type Config struct {
Features map[string]bool `json:"features,omitempty"`
Mirrors []string `json:"registry-mirrors,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
}
type BuilderEntitlements struct {
NetworkHost bool `json:"network-host,omitempty"`
SecurityInsecure bool `json:"security-insecure,omitempty"`
}
type BuilderConfig struct {
Entitlements BuilderEntitlements `json:",omitempty"`
}

View File

@ -0,0 +1,243 @@
package dockerd
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
)
type LogT interface {
Logf(string, ...interface{})
}
type nopLog struct{}
func (nopLog) Logf(string, ...interface{}) {}
const (
shortLen = 12
defaultDockerdBinary = "dockerd"
)
type Option func(*Daemon)
type Daemon struct {
root string
folder string
Wait chan error
id string
cmd *exec.Cmd
storageDriver string
execRoot string
dockerdBinary string
Log LogT
pidFile string
sockPath string
args []string
}
var sockRoot = filepath.Join(os.TempDir(), "docker-integration")
func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
if err := os.MkdirAll(sockRoot, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon socket root %q", sockRoot)
}
id := "d" + identity.NewID()[:shortLen]
daemonFolder, err := filepath.Abs(filepath.Join(workingDir, id))
if err != nil {
return nil, err
}
daemonRoot := filepath.Join(daemonFolder, "root")
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot)
}
d := &Daemon{
id: id,
folder: daemonFolder,
root: daemonRoot,
storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
// dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation)
execRoot: filepath.Join(os.TempDir(), "dxr", id),
dockerdBinary: defaultDockerdBinary,
Log: nopLog{},
sockPath: filepath.Join(sockRoot, id+".sock"),
}
for _, op := range ops {
op(d)
}
return d, nil
}
func (d *Daemon) Sock() string {
return "unix://" + d.sockPath
}
func (d *Daemon) StartWithError(daemonLogs map[string]*bytes.Buffer, providedArgs ...string) error {
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
if err != nil {
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
}
if d.pidFile == "" {
d.pidFile = filepath.Join(d.folder, "docker.pid")
}
d.args = []string{
"--data-root", d.root,
"--exec-root", d.execRoot,
"--pidfile", d.pidFile,
"--containerd-namespace", d.id,
"--containerd-plugins-namespace", d.id + "p",
"--host", d.Sock(),
}
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.args = append(d.args, "--userns-remap", root)
}
// If we don't explicitly set the log-level or debug flag(-D) then
// turn on debug mode
var foundLog, foundSd bool
for _, a := range providedArgs {
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
foundLog = true
}
if strings.Contains(a, "--storage-driver") {
foundSd = true
}
}
if !foundLog {
d.args = append(d.args, "--debug")
}
if d.storageDriver != "" && !foundSd {
d.args = append(d.args, "--storage-driver", d.storageDriver)
}
d.args = append(d.args, providedArgs...)
d.cmd = exec.Command(dockerdBinary, d.args...)
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1", "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1")
if daemonLogs != nil {
b := new(bytes.Buffer)
daemonLogs["stdout: "+d.cmd.Path] = b
d.cmd.Stdout = &lockingWriter{Writer: b}
b = new(bytes.Buffer)
daemonLogs["stderr: "+d.cmd.Path] = b
d.cmd.Stderr = &lockingWriter{Writer: b}
}
fmt.Fprintf(d.cmd.Stderr, "> startCmd %v %+v\n", time.Now(), d.cmd.String())
if err := d.cmd.Start(); err != nil {
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
}
wait := make(chan error, 1)
go func() {
ret := d.cmd.Wait()
d.Log.Logf("[%s] exiting daemon", d.id)
// If we send before logging, we might accidentally log _after_ the test is done.
// As of Go 1.12, this incurs a panic instead of silently being dropped.
wait <- ret
close(wait)
}()
d.Wait = wait
d.Log.Logf("[%s] daemon started\n", d.id)
return nil
}
var errDaemonNotStarted = errors.New("daemon not started")
func (d *Daemon) StopWithError() (err error) {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
if err != nil {
d.Log.Logf("[%s] error while stopping daemon: %v", d.id, err)
} else {
d.Log.Logf("[%s] daemon stopped", d.id)
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
}
d.cmd = nil
}()
i := 1
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
tick := ticker.C
d.Log.Logf("[%s] stopping daemon", d.id)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
if strings.Contains(err.Error(), "os: process already finished") {
return errDaemonNotStarted
}
return errors.Wrapf(err, "[%s] could not send signal", d.id)
}
out1:
for {
select {
case err := <-d.Wait:
return err
case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
d.Log.Logf("[%s] daemon stop timed out after 20 seconds", d.id)
break out1
}
}
out2:
for {
select {
case err := <-d.Wait:
return err
case <-tick:
i++
if i > 5 {
d.Log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i)
break out2
}
d.Log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i)
}
}
}
if err := d.cmd.Process.Kill(); err != nil {
d.Log.Logf("[%s] failed to kill daemon: %v", d.id, err)
return err
}
return nil
}
type lockingWriter struct {
mu sync.Mutex
io.Writer
}
func (w *lockingWriter) Write(dt []byte) (int, error) {
w.mu.Lock()
n, err := w.Writer.Write(dt)
w.mu.Unlock()
return n, err
}

View File

@ -0,0 +1,130 @@
package testutil
import (
"context"
"encoding/json"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ImageInfo struct {
Desc ocispecs.Descriptor
Manifest ocispecs.Manifest
Img ocispecs.Image
Layers []map[string]*TarItem
LayersRaw [][]byte
descPlatform string
}
type ImagesInfo struct {
Desc ocispecs.Descriptor
Index ocispecs.Index
Images []*ImageInfo
}
func (idx ImagesInfo) Find(platform string) *ImageInfo {
result := idx.Filter(platform)
if len(result.Images) == 0 {
return nil
}
return result.Images[0]
}
func (idx ImagesInfo) Filter(platform string) *ImagesInfo {
result := &ImagesInfo{Desc: idx.Desc}
for _, info := range idx.Images {
if info.descPlatform == platform {
result.Images = append(result.Images, info)
}
}
return result
}
func (idx ImagesInfo) FindAttestation(platform string) *ImageInfo {
img := idx.Find(platform)
if img == nil {
return nil
}
for _, info := range idx.Images {
if info.Desc.Annotations["vnd.docker.reference.digest"] == string(img.Desc.Digest) {
return info
}
}
return nil
}
func ReadImages(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImagesInfo, error) {
idx := &ImagesInfo{Desc: desc}
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &idx.Index); err != nil {
return nil, err
}
if !images.IsIndexType(idx.Index.MediaType) {
img, err := ReadImage(ctx, p, desc)
if err != nil {
return nil, err
}
img.descPlatform = platforms.Format(img.Img.Platform)
idx.Images = append(idx.Images, img)
return idx, nil
}
for _, m := range idx.Index.Manifests {
img, err := ReadImage(ctx, p, m)
if err != nil {
return nil, err
}
img.descPlatform = platforms.Format(*m.Platform)
idx.Images = append(idx.Images, img)
}
return idx, nil
}
func ReadImage(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImageInfo, error) {
ii := &ImageInfo{Desc: desc}
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &ii.Manifest); err != nil {
return nil, err
}
if !images.IsManifestType(ii.Manifest.MediaType) {
return nil, errors.Errorf("invalid manifest type %s", ii.Manifest.MediaType)
}
dt, err = content.ReadBlob(ctx, p, ii.Manifest.Config)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &ii.Img); err != nil {
return nil, err
}
ii.Layers = make([]map[string]*TarItem, len(ii.Manifest.Layers))
ii.LayersRaw = make([][]byte, len(ii.Manifest.Layers))
for i, l := range ii.Manifest.Layers {
dt, err := content.ReadBlob(ctx, p, l)
if err != nil {
return nil, err
}
ii.LayersRaw[i] = dt
if images.IsLayerType(l.MediaType) {
m, err := ReadTarToMap(dt, true)
if err != nil {
return nil, err
}
ii.Layers[i] = m
}
}
return ii, nil
}

View File

@ -0,0 +1,89 @@
package integration
import (
"fmt"
"net"
"net/http"
"os"
"os/exec"
"testing"
"time"
"github.com/pkg/errors"
)
const (
azuriteBin = "azurite-blob"
)
type AzuriteOpts struct {
AccountName string
AccountKey string
}
func NewAzuriteServer(t *testing.T, sb Sandbox, opts AzuriteOpts) (address string, cl func() error, err error) {
t.Helper()
if _, err := exec.LookPath(azuriteBin); err != nil {
return "", nil, errors.Wrapf(err, "failed to lookup %s binary", azuriteBin)
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return "", nil, err
}
addr := l.Addr().String()
if err = l.Close(); err != nil {
return "", nil, err
}
host, port, err := net.SplitHostPort(addr)
if err != nil {
return "", nil, err
}
address = fmt.Sprintf("http://%s/%s", addr, opts.AccountName)
// start server
cmd := exec.Command(azuriteBin, "--disableProductStyleUrl", "--blobHost", host, "--blobPort", port, "--location", t.TempDir())
cmd.Env = append(os.Environ(), []string{
"AZURITE_ACCOUNTS=" + opts.AccountName + ":" + opts.AccountKey,
}...)
azuriteStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", nil, err
}
if err = waitAzurite(address, 15*time.Second); err != nil {
azuriteStop()
return "", nil, errors.Wrapf(err, "azurite did not start up: %s", formatLogs(sb.Logs()))
}
deferF.append(azuriteStop)
return
}
func waitAzurite(address string, d time.Duration) error {
step := 1 * time.Second
i := 0
for {
if resp, err := http.Get(fmt.Sprintf("%s?comp=list", address)); err == nil {
resp.Body.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}

View File

@ -0,0 +1,241 @@
package integration
import (
"bytes"
"context"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
func InitContainerdWorker() {
Register(&Containerd{
ID: "containerd",
Containerd: "containerd",
})
// defined in Dockerfile
// e.g. `containerd-1.1=/opt/containerd-1.1/bin,containerd-42.0=/opt/containerd-42.0/bin`
if s := os.Getenv("BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"); s != "" {
entries := strings.Split(s, ",")
for _, entry := range entries {
pair := strings.Split(strings.TrimSpace(entry), "=")
if len(pair) != 2 {
panic(errors.Errorf("unexpected BUILDKIT_INTEGRATION_CONTAINERD_EXTRA: %q", s))
}
name, bin := pair[0], pair[1]
Register(&Containerd{
ID: name,
Containerd: filepath.Join(bin, "containerd"),
// override PATH to make sure that the expected version of the shim binary is used
ExtraEnv: []string{fmt.Sprintf("PATH=%s:%s", bin, os.Getenv("PATH"))},
})
}
}
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if rootlessSupported(uid) {
Register(&Containerd{
ID: "containerd-rootless",
Containerd: "containerd",
UID: uid,
GID: gid,
Snapshotter: "native", // TODO: test with fuse-overlayfs as well, or automatically determine snapshotter
})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
Register(&Containerd{
ID: fmt.Sprintf("containerd-snapshotter-%s", s),
Containerd: "containerd",
Snapshotter: s,
})
}
}
type Containerd struct {
ID string
Containerd string
Snapshotter string
UID int
GID int
ExtraEnv []string // e.g. "PATH=/opt/containerd-1.4/bin:/usr/bin:..."
}
func (c *Containerd) Name() string {
return c.ID
}
func (c *Containerd) Rootless() bool {
return c.UID != 0
}
func (c *Containerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) {
if err := lookupBinary(c.Containerd); err != nil {
return nil, nil, err
}
if err := lookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
rootless := false
if c.UID != 0 {
if c.GID == 0 {
return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", c.UID, c.GID)
}
rootless = true
}
tmpdir, err := os.MkdirTemp("", "bktest_containerd")
if err != nil {
return nil, nil, err
}
if rootless {
if err := os.Chown(tmpdir, c.UID, c.GID); err != nil {
return nil, nil, err
}
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
address := filepath.Join(tmpdir, "containerd.sock")
config := fmt.Sprintf(`root = %q
state = %q
# CRI plugins listens on 10010/tcp for stream server.
# We disable CRI plugin so that multiple instance can run simultaneously.
disabled_plugins = ["cri"]
[grpc]
address = %q
[debug]
level = "debug"
address = %q
`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"))
var snBuildkitdArgs []string
if c.Snapshotter != "" {
snBuildkitdArgs = append(snBuildkitdArgs,
fmt.Sprintf("--containerd-worker-snapshotter=%s", c.Snapshotter))
if c.Snapshotter == "stargz" {
snPath, snCl, err := runStargzSnapshotter(cfg)
if err != nil {
return nil, nil, err
}
deferF.append(snCl)
config = fmt.Sprintf(`%s
[proxy_plugins]
[proxy_plugins.stargz]
type = "snapshot"
address = %q
`, config, snPath)
}
}
configFile := filepath.Join(tmpdir, "config.toml")
if err := os.WriteFile(configFile, []byte(config), 0644); err != nil {
return nil, nil, err
}
containerdArgs := []string{c.Containerd, "--config", configFile}
rootlessKitState := filepath.Join(tmpdir, "rootlesskit-containerd")
if rootless {
containerdArgs = append(append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.UID), "-i",
fmt.Sprintf("CONTAINERD_ROOTLESS_ROOTLESSKIT_STATE_DIR=%s", rootlessKitState),
// Integration test requires the access to localhost of the host network namespace.
// TODO: remove these configurations
"CONTAINERD_ROOTLESS_ROOTLESSKIT_NET=host",
"CONTAINERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=none",
"CONTAINERD_ROOTLESS_ROOTLESSKIT_FLAGS=--mtu=0",
}, c.ExtraEnv...), "containerd-rootless.sh", "-c", configFile)
}
cmd := exec.Command(containerdArgs[0], containerdArgs[1:]...) //nolint:gosec // test utility
cmd.Env = append(os.Environ(), c.ExtraEnv...)
ctdStop, err := startCmd(cmd, cfg.Logs)
if err != nil {
return nil, nil, err
}
if err := waitUnix(address, 10*time.Second, cmd); err != nil {
ctdStop()
return nil, nil, errors.Wrapf(err, "containerd did not start up: %s", formatLogs(cfg.Logs))
}
deferF.append(ctdStop)
buildkitdArgs := append([]string{"buildkitd",
"--oci-worker=false",
"--containerd-worker-gc=false",
"--containerd-worker=true",
"--containerd-worker-addr", address,
"--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603
}, snBuildkitdArgs...)
if runtime.GOOS != "windows" && c.Snapshotter != "native" {
c.ExtraEnv = append(c.ExtraEnv, "BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF=true")
}
if rootless {
pidStr, err := os.ReadFile(filepath.Join(rootlessKitState, "child_pid"))
if err != nil {
return nil, nil, err
}
pid, err := strconv.ParseInt(string(pidStr), 10, 64)
if err != nil {
return nil, nil, err
}
buildkitdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.UID), "-i", "--", "exec",
"nsenter", "-U", "--preserve-credentials", "-m", "-t", fmt.Sprintf("%d", pid)},
append(buildkitdArgs, "--containerd-worker-snapshotter=native")...)
}
buildkitdSock, stop, err := runBuildkitd(ctx, cfg, buildkitdArgs, cfg.Logs, c.UID, c.GID, c.ExtraEnv)
if err != nil {
printLogs(cfg.Logs, log.Println)
return nil, nil, err
}
deferF.append(stop)
return backend{
address: buildkitdSock,
containerdAddress: address,
rootless: rootless,
snapshotter: c.Snapshotter,
}, cl, nil
}
func formatLogs(m map[string]*bytes.Buffer) string {
var ss []string
for k, b := range m {
if b != nil {
ss = append(ss, fmt.Sprintf("%q:%q", k, b.String()))
}
}
return strings.Join(ss, ",")
}

View File

@ -0,0 +1,248 @@
package integration
import (
"context"
"encoding/json"
"io"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/client"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/util/testutil/dockerd"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// InitDockerdWorker registers a dockerd worker with the global registry.
func InitDockerdWorker() {
Register(&Moby{
ID: "dockerd",
IsRootless: false,
Unsupported: []string{
FeatureCacheExport,
FeatureCacheImport,
FeatureCacheBackendAzblob,
FeatureCacheBackendGha,
FeatureCacheBackendLocal,
FeatureCacheBackendRegistry,
FeatureCacheBackendS3,
FeatureDirectPush,
FeatureImageExporter,
FeatureMultiCacheExport,
FeatureMultiPlatform,
FeatureOCIExporter,
FeatureOCILayout,
FeatureProvenance,
FeatureSBOM,
FeatureSecurityMode,
FeatureCNINetwork,
},
})
Register(&Moby{
ID: "dockerd-containerd",
IsRootless: false,
ContainerdSnapshotter: true,
Unsupported: []string{
FeatureSecurityMode,
FeatureCNINetwork,
},
})
}
type Moby struct {
ID string
IsRootless bool
ContainerdSnapshotter bool
Unsupported []string
}
func (c Moby) Name() string {
return c.ID
}
func (c Moby) Rootless() bool {
return c.IsRootless
}
func (c Moby) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) {
if err := requireRoot(); err != nil {
return nil, nil, err
}
bkcfg, err := config.LoadFile(cfg.ConfigFile)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to load buildkit config file %s", cfg.ConfigFile)
}
dcfg := dockerd.Config{
Features: map[string]bool{
"containerd-snapshotter": c.ContainerdSnapshotter,
},
}
if reg, ok := bkcfg.Registries["docker.io"]; ok && len(reg.Mirrors) > 0 {
for _, m := range reg.Mirrors {
dcfg.Mirrors = append(dcfg.Mirrors, "http://"+m)
}
}
if bkcfg.Entitlements != nil {
for _, e := range bkcfg.Entitlements {
switch e {
case "network.host":
dcfg.Builder.Entitlements.NetworkHost = true
case "security.insecure":
dcfg.Builder.Entitlements.SecurityInsecure = true
}
}
}
dcfgdt, err := json.Marshal(dcfg)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to marshal dockerd config")
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
var proxyGroup errgroup.Group
deferF.append(proxyGroup.Wait)
workDir, err := os.MkdirTemp("", "integration")
if err != nil {
return nil, nil, err
}
d, err := dockerd.NewDaemon(workDir)
if err != nil {
return nil, nil, errors.Errorf("new daemon error: %q, %s", err, formatLogs(cfg.Logs))
}
dockerdConfigFile := filepath.Join(workDir, "daemon.json")
if err := os.WriteFile(dockerdConfigFile, dcfgdt, 0644); err != nil {
return nil, nil, err
}
dockerdFlags := []string{
"--config-file", dockerdConfigFile,
"--userland-proxy=false",
"--debug",
}
if s := os.Getenv("BUILDKIT_INTEGRATION_DOCKERD_FLAGS"); s != "" {
dockerdFlags = append(dockerdFlags, strings.Split(strings.TrimSpace(s), "\n")...)
}
err = d.StartWithError(cfg.Logs, dockerdFlags...)
if err != nil {
return nil, nil, err
}
deferF.append(d.StopWithError)
if err := waitUnix(d.Sock(), 5*time.Second, nil); err != nil {
return nil, nil, errors.Errorf("dockerd did not start up: %q, %s", err, formatLogs(cfg.Logs))
}
dockerAPI, err := client.NewClientWithOpts(client.WithHost(d.Sock()))
if err != nil {
return nil, nil, err
}
deferF.append(dockerAPI.Close)
err = waitForAPI(ctx, dockerAPI, 5*time.Second)
if err != nil {
return nil, nil, errors.Wrapf(err, "dockerd client api timed out: %s", formatLogs(cfg.Logs))
}
// Create a file descriptor to be used as a Unix domain socket.
// Remove it immediately (the name will still be valid for the socket) so that
// we don't leave files all over the users tmp tree.
f, err := os.CreateTemp("", "buildkit-integration")
if err != nil {
return
}
localPath := f.Name()
f.Close()
os.Remove(localPath)
listener, err := net.Listen("unix", localPath)
if err != nil {
return nil, nil, errors.Wrapf(err, "dockerd listener error: %s", formatLogs(cfg.Logs))
}
deferF.append(listener.Close)
proxyGroup.Go(func() error {
for {
tmpConn, err := listener.Accept()
if err != nil {
// Ignore the error from accept which is always a system error.
return nil
}
conn, err := dockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
if err != nil {
return err
}
proxyGroup.Go(func() error {
_, err := io.Copy(conn, tmpConn)
if err != nil {
return err
}
return tmpConn.Close()
})
proxyGroup.Go(func() error {
_, err := io.Copy(tmpConn, conn)
if err != nil {
return err
}
return conn.Close()
})
}
})
return backend{
address: "unix://" + listener.Addr().String(),
dockerAddress: d.Sock(),
rootless: c.IsRootless,
isDockerd: true,
unsupportedFeatures: c.Unsupported,
}, cl, nil
}
func waitForAPI(ctx context.Context, apiClient *client.Client, d time.Duration) error {
step := 50 * time.Millisecond
i := 0
for {
if _, err := apiClient.Ping(ctx); err == nil {
break
}
i++
if time.Duration(i)*step > d {
return errors.New("failed to connect to /_ping endpoint")
}
time.Sleep(step)
}
return nil
}
func IsTestDockerd() bool {
return os.Getenv("TEST_DOCKERD") == "1"
}
func IsTestDockerdMoby(sb Sandbox) bool {
b, err := getBackend(sb)
if err != nil {
return false
}
return b.isDockerd && sb.Name() == "dockerd"
}

View File

@ -0,0 +1,56 @@
package integration
import (
"context"
"encoding/json"
"os"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/images/archive"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
func providerFromBinary(fn string) (_ ocispecs.Descriptor, _ content.Provider, _ func(), err error) {
ctx := context.TODO()
tmpDir, err := os.MkdirTemp("", "buildkit-state")
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
close := func() {
os.RemoveAll(tmpDir)
}
defer func() {
if err != nil {
close()
}
}()
// can't use contentutil.Buffer because ImportIndex takes content.Store even though only requires Provider/Ingester
c, err := local.NewStore(tmpDir)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
f, err := os.Open(fn)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
defer f.Close()
desc, err := archive.ImportIndex(ctx, c, f)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
var idx ocispecs.Index
dt, err := content.ReadBlob(ctx, c, desc)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
if err := json.Unmarshal(dt, &idx); err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
return idx.Manifests[0], c, close, nil
}

View File

@ -0,0 +1,116 @@
package integration
import (
"fmt"
"net"
"net/http"
"os"
"os/exec"
"testing"
"time"
"github.com/pkg/errors"
)
const (
minioBin = "minio"
mcBin = "mc"
)
type MinioOpts struct {
Region string
AccessKeyID string
SecretAccessKey string
}
func NewMinioServer(t *testing.T, sb Sandbox, opts MinioOpts) (address string, bucket string, cl func() error, err error) {
t.Helper()
bucket = randomString(10)
if _, err := exec.LookPath(minioBin); err != nil {
return "", "", nil, errors.Wrapf(err, "failed to lookup %s binary", minioBin)
}
if _, err := exec.LookPath(mcBin); err != nil {
return "", "", nil, errors.Wrapf(err, "failed to lookup %s binary", mcBin)
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return "", "", nil, err
}
addr := l.Addr().String()
if err = l.Close(); err != nil {
return "", "", nil, err
}
address = "http://" + addr
// start server
cmd := exec.Command(minioBin, "server", "--json", "--address", addr, t.TempDir())
cmd.Env = append(os.Environ(), []string{
"MINIO_ROOT_USER=" + opts.AccessKeyID,
"MINIO_ROOT_PASSWORD=" + opts.SecretAccessKey,
}...)
minioStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", "", nil, err
}
if err = waitMinio(address, 15*time.Second); err != nil {
minioStop()
return "", "", nil, errors.Wrapf(err, "minio did not start up: %s", formatLogs(sb.Logs()))
}
deferF.append(minioStop)
// create alias config
alias := randomString(10)
cmd = exec.Command(mcBin, "alias", "set", alias, address, opts.AccessKeyID, opts.SecretAccessKey)
if err := runCmd(cmd, sb.Logs()); err != nil {
return "", "", nil, err
}
deferF.append(func() error {
return exec.Command(mcBin, "alias", "rm", alias).Run()
})
// create bucket
cmd = exec.Command(mcBin, "mb", "--region", opts.Region, fmt.Sprintf("%s/%s", alias, bucket)) // #nosec G204
if err := runCmd(cmd, sb.Logs()); err != nil {
return "", "", nil, err
}
// trace
cmd = exec.Command(mcBin, "admin", "trace", "--json", alias)
traceStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", "", nil, err
}
deferF.append(traceStop)
return
}
func waitMinio(address string, d time.Duration) error {
step := 1 * time.Second
i := 0
for {
if resp, err := http.Get(fmt.Sprintf("%s/minio/health/live", address)); err == nil {
resp.Body.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}

View File

@ -0,0 +1,86 @@
package integration
import (
"context"
"fmt"
"log"
"os"
"runtime"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
func InitOCIWorker() {
Register(&OCI{ID: "oci"})
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if rootlessSupported(uid) {
Register(&OCI{ID: "oci-rootless", UID: uid, GID: gid})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
Register(&OCI{ID: "oci-snapshotter-" + s, Snapshotter: s})
}
}
type OCI struct {
ID string
UID int
GID int
Snapshotter string
}
func (s *OCI) Name() string {
return s.ID
}
func (s *OCI) Rootless() bool {
return s.UID != 0
}
func (s *OCI) New(ctx context.Context, cfg *BackendConfig) (Backend, func() error, error) {
if err := lookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
// Include use of --oci-worker-labels to trigger https://github.com/moby/buildkit/pull/603
buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-gc=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"}
if s.Snapshotter != "" {
buildkitdArgs = append(buildkitdArgs,
fmt.Sprintf("--oci-worker-snapshotter=%s", s.Snapshotter))
}
if s.UID != 0 {
if s.GID == 0 {
return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", s.UID, s.GID)
}
// TODO: make sure the user exists and subuid/subgid are configured.
buildkitdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", s.UID), "-i", "--", "exec", "rootlesskit"}, buildkitdArgs...)
}
var extraEnv []string
if runtime.GOOS != "windows" && s.Snapshotter != "native" {
extraEnv = append(extraEnv, "BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF=true")
}
buildkitdSock, stop, err := runBuildkitd(ctx, cfg, buildkitdArgs, cfg.Logs, s.UID, s.GID, extraEnv)
if err != nil {
printLogs(cfg.Logs, log.Println)
return nil, nil, err
}
return backend{
address: buildkitdSock,
rootless: s.UID != 0,
snapshotter: s.Snapshotter,
}, stop, nil
}

View File

@ -0,0 +1,15 @@
package integration
var pins = map[string]map[string]string{
// busybox is pinned to 1.35. Newer produces has "illegal instruction" panic on some of Github infra on sha256sum
"busybox:latest": {
"amd64": "sha256:0d5a701f0ca53f38723108687add000e1922f812d4187dea7feaee85d2f5a6c5",
"arm64v8": "sha256:ffe38d75e44d8ffac4cd6d09777ffc31e94ea0ded6a0164e825a325dc17a3b68",
"library": "sha256:f4ed5f2163110c26d42741fdc92bd1710e118aed4edb19212548e8ca4e5fca22",
},
"alpine:latest": {
"amd64": "sha256:c0d488a800e4127c334ad20d61d7bc21b4097540327217dfab52262adc02380c",
"arm64v8": "sha256:af06af3514c44a964d3b905b498cf6493db8f1cde7c10e078213a89c87308ba0",
"library": "sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4",
},
}

View File

@ -0,0 +1,109 @@
package integration
import (
"bufio"
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"time"
"github.com/pkg/errors"
)
func NewRegistry(dir string) (url string, cl func() error, err error) {
if err := lookupBinary("registry"); err != nil {
return "", nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if dir == "" {
tmpdir, err := os.MkdirTemp("", "test-registry")
if err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
dir = tmpdir
}
if _, err := os.Stat(filepath.Join(dir, "config.yaml")); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return "", nil, err
}
template := fmt.Sprintf(`version: 0.1
loglevel: debug
storage:
filesystem:
rootdirectory: %s
http:
addr: 127.0.0.1:0
`, filepath.Join(dir, "data"))
if err := os.WriteFile(filepath.Join(dir, "config.yaml"), []byte(template), 0600); err != nil {
return "", nil, err
}
}
cmd := exec.Command("registry", "serve", filepath.Join(dir, "config.yaml")) //nolint:gosec // test utility
rc, err := cmd.StderrPipe()
if err != nil {
return "", nil, err
}
stop, err := startCmd(cmd, nil)
if err != nil {
return "", nil, err
}
deferF.append(stop)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
url, err = detectPort(ctx, rc)
if err != nil {
return "", nil, err
}
return
}
func detectPort(ctx context.Context, rc io.ReadCloser) (string, error) {
r := regexp.MustCompile(`listening on 127\.0\.0\.1:(\d+)`)
s := bufio.NewScanner(rc)
found := make(chan struct{})
defer func() {
close(found)
go io.Copy(io.Discard, rc)
}()
go func() {
select {
case <-ctx.Done():
select {
case <-found:
return
default:
rc.Close()
}
case <-found:
}
}()
for s.Scan() {
res := r.FindSubmatch(s.Bytes())
if len(res) > 1 {
return "localhost:" + string(res[1]), nil
}
}
return "", errors.Errorf("no listening address found")
}

View File

@ -0,0 +1,459 @@
package integration
import (
"bytes"
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes/docker"
"github.com/gofrs/flock"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/contentutil"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/semaphore"
)
var sandboxLimiter *semaphore.Weighted
func init() {
sandboxLimiter = semaphore.NewWeighted(int64(runtime.GOMAXPROCS(0)))
}
// Backend is the minimal interface that describes a testing backend.
type Backend interface {
Address() string
DockerAddress() string
ContainerdAddress() string
Rootless() bool
Snapshotter() string
}
type Sandbox interface {
Backend
Context() context.Context
Cmd(...string) *exec.Cmd
Logs() map[string]*bytes.Buffer
PrintLogs(*testing.T)
ClearLogs()
NewRegistry() (string, error)
Value(string) interface{} // chosen matrix value
Name() string
}
// BackendConfig is used to configure backends created by a worker.
type BackendConfig struct {
Logs map[string]*bytes.Buffer
ConfigFile string
}
type Worker interface {
New(context.Context, *BackendConfig) (Backend, func() error, error)
Name() string
Rootless() bool
}
type ConfigUpdater interface {
UpdateConfigFile(string) string
}
type Test interface {
Name() string
Run(t *testing.T, sb Sandbox)
}
type testFunc struct {
name string
run func(t *testing.T, sb Sandbox)
}
func (f testFunc) Name() string {
return f.name
}
func (f testFunc) Run(t *testing.T, sb Sandbox) {
t.Helper()
f.run(t, sb)
}
func TestFuncs(funcs ...func(t *testing.T, sb Sandbox)) []Test {
var tests []Test
names := map[string]struct{}{}
for _, f := range funcs {
name := getFunctionName(f)
if _, ok := names[name]; ok {
panic("duplicate test: " + name)
}
names[name] = struct{}{}
tests = append(tests, testFunc{name: name, run: f})
}
return tests
}
var defaultWorkers []Worker
func Register(w Worker) {
defaultWorkers = append(defaultWorkers, w)
}
func List() []Worker {
return defaultWorkers
}
// TestOpt is an option that can be used to configure a set of integration
// tests.
type TestOpt func(*testConf)
func WithMatrix(key string, m map[string]interface{}) TestOpt {
return func(tc *testConf) {
if tc.matrix == nil {
tc.matrix = map[string]map[string]interface{}{}
}
tc.matrix[key] = m
}
}
func WithMirroredImages(m map[string]string) TestOpt {
return func(tc *testConf) {
if tc.mirroredImages == nil {
tc.mirroredImages = map[string]string{}
}
for k, v := range m {
tc.mirroredImages[k] = v
}
}
}
type testConf struct {
matrix map[string]map[string]interface{}
mirroredImages map[string]string
}
func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
if testing.Short() {
t.Skip("skipping in short mode")
}
if os.Getenv("SKIP_INTEGRATION_TESTS") == "1" {
t.Skip("skipping integration tests")
}
var tc testConf
for _, o := range opt {
o(&tc)
}
mirror, cleanup, err := runMirror(t, tc.mirroredImages)
require.NoError(t, err)
t.Cleanup(func() { _ = cleanup() })
matrix := prepareValueMatrix(tc)
list := List()
if os.Getenv("BUILDKIT_WORKER_RANDOM") == "1" && len(list) > 0 {
rng := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec // using math/rand is fine in a test utility
list = []Worker{list[rng.Intn(len(list))]}
}
for _, br := range list {
for _, tc := range testCases {
for _, mv := range matrix {
fn := tc.Name()
name := fn + "/worker=" + br.Name() + mv.functionSuffix()
func(fn, testName string, br Worker, tc Test, mv matrixValue) {
ok := t.Run(testName, func(t *testing.T) {
if strings.Contains(fn, "NoRootless") && br.Rootless() {
// skip sandbox setup
t.Skip("rootless")
}
ctx := appcontext.Context()
if !strings.HasSuffix(fn, "NoParallel") {
t.Parallel()
}
require.NoError(t, sandboxLimiter.Acquire(context.TODO(), 1))
defer sandboxLimiter.Release(1)
sb, closer, err := newSandbox(ctx, br, mirror, mv)
require.NoError(t, err)
t.Cleanup(func() { _ = closer() })
defer func() {
if t.Failed() {
sb.PrintLogs(t)
}
}()
tc.Run(t, sb)
})
require.True(t, ok)
}(fn, name, br, tc, mv)
}
}
}
}
func getFunctionName(i interface{}) string {
fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
dot := strings.LastIndex(fullname, ".") + 1
return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support
}
var localImageCache map[string]map[string]struct{}
func copyImagesLocal(t *testing.T, host string, images map[string]string) error {
for to, from := range images {
if localImageCache == nil {
localImageCache = map[string]map[string]struct{}{}
}
if _, ok := localImageCache[host]; !ok {
localImageCache[host] = map[string]struct{}{}
}
if _, ok := localImageCache[host][to]; ok {
continue
}
localImageCache[host][to] = struct{}{}
var desc ocispecs.Descriptor
var provider content.Provider
var err error
if strings.HasPrefix(from, "local:") {
var closer func()
desc, provider, closer, err = providerFromBinary(strings.TrimPrefix(from, "local:"))
if err != nil {
return err
}
if closer != nil {
defer closer()
}
} else {
desc, provider, err = contentutil.ProviderFromRef(from)
if err != nil {
return err
}
}
// already exists check
_, _, err = docker.NewResolver(docker.ResolverOptions{}).Resolve(context.TODO(), host+"/"+to)
if err == nil {
continue
}
ingester, err := contentutil.IngesterFromRef(host + "/" + to)
if err != nil {
return err
}
if err := contentutil.CopyChain(context.TODO(), ingester, provider, desc); err != nil {
return err
}
t.Logf("copied %s to local mirror %s", from, host+"/"+to)
}
return nil
}
func OfficialImages(names ...string) map[string]string {
ns := runtime.GOARCH
if ns == "arm64" {
ns = "arm64v8"
} else if ns != "amd64" {
ns = "library"
}
m := map[string]string{}
for _, name := range names {
ref := "docker.io/" + ns + "/" + name
if pns, ok := pins[name]; ok {
if dgst, ok := pns[ns]; ok {
ref += "@" + dgst
}
}
m["library/"+name] = ref
}
return m
}
func withMirrorConfig(mirror string) ConfigUpdater {
return mirrorConfig(mirror)
}
type mirrorConfig string
func (mc mirrorConfig) UpdateConfigFile(in string) string {
return fmt.Sprintf(`%s
[registry."docker.io"]
mirrors=["%s"]
`, in, mc)
}
func writeConfig(updaters []ConfigUpdater) (string, error) {
tmpdir, err := os.MkdirTemp("", "bktest_config")
if err != nil {
return "", err
}
if err := os.Chmod(tmpdir, 0711); err != nil {
return "", err
}
s := ""
for _, upt := range updaters {
s = upt.UpdateConfigFile(s)
}
if err := os.WriteFile(filepath.Join(tmpdir, buildkitdConfigFile), []byte(s), 0644); err != nil {
return "", err
}
return tmpdir, nil
}
func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ func() error, err error) {
mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR")
var lock *flock.Flock
if mirrorDir != "" {
if err := os.MkdirAll(mirrorDir, 0700); err != nil {
return "", nil, err
}
lock = flock.New(filepath.Join(mirrorDir, "lock"))
if err := lock.Lock(); err != nil {
return "", nil, err
}
defer func() {
if err != nil {
lock.Unlock()
}
}()
}
mirror, cleanup, err := NewRegistry(mirrorDir)
if err != nil {
return "", nil, err
}
defer func() {
if err != nil {
cleanup()
}
}()
if err := copyImagesLocal(t, mirror, mirroredImages); err != nil {
return "", nil, err
}
if mirrorDir != "" {
if err := lock.Unlock(); err != nil {
return "", nil, err
}
}
return mirror, cleanup, err
}
type matrixValue struct {
fn []string
values map[string]matrixValueChoice
}
func (mv matrixValue) functionSuffix() string {
if len(mv.fn) == 0 {
return ""
}
sort.Strings(mv.fn)
sb := &strings.Builder{}
for _, f := range mv.fn {
sb.Write([]byte("/" + f + "=" + mv.values[f].name))
}
return sb.String()
}
type matrixValueChoice struct {
name string
value interface{}
}
func newMatrixValue(key, name string, v interface{}) matrixValue {
return matrixValue{
fn: []string{key},
values: map[string]matrixValueChoice{
key: {
name: name,
value: v,
},
},
}
}
func prepareValueMatrix(tc testConf) []matrixValue {
m := []matrixValue{}
for featureName, values := range tc.matrix {
current := m
m = []matrixValue{}
for featureValue, v := range values {
if len(current) == 0 {
m = append(m, newMatrixValue(featureName, featureValue, v))
}
for _, c := range current {
vv := newMatrixValue(featureName, featureValue, v)
vv.fn = append(vv.fn, c.fn...)
for k, v := range c.values {
vv.values[k] = v
}
m = append(m, vv)
}
}
}
if len(m) == 0 {
m = append(m, matrixValue{})
}
return m
}
func runStargzSnapshotter(cfg *BackendConfig) (address string, cl func() error, err error) {
binary := "containerd-stargz-grpc"
if err := lookupBinary(binary); err != nil {
return "", nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
tmpStargzDir, err := os.MkdirTemp("", "bktest_containerd_stargz_grpc")
if err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpStargzDir) })
address = filepath.Join(tmpStargzDir, "containerd-stargz-grpc.sock")
stargzRootDir := filepath.Join(tmpStargzDir, "root")
cmd := exec.Command(binary,
"--log-level", "debug",
"--address", address,
"--root", stargzRootDir)
snStop, err := startCmd(cmd, cfg.Logs)
if err != nil {
return "", nil, err
}
if err = waitUnix(address, 10*time.Second, cmd); err != nil {
snStop()
return "", nil, errors.Wrapf(err, "containerd-stargz-grpc did not start up: %s", formatLogs(cfg.Logs))
}
deferF.append(snStop)
return
}

View File

@ -0,0 +1,369 @@
package integration
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/google/shlex"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
const buildkitdConfigFile = "buildkitd.toml"
type backend struct {
address string
dockerAddress string
containerdAddress string
rootless bool
snapshotter string
unsupportedFeatures []string
isDockerd bool
}
func (b backend) Address() string {
return b.address
}
func (b backend) DockerAddress() string {
return b.dockerAddress
}
func (b backend) ContainerdAddress() string {
return b.containerdAddress
}
func (b backend) Rootless() bool {
return b.rootless
}
func (b backend) Snapshotter() string {
return b.snapshotter
}
func (b backend) isUnsupportedFeature(feature string) bool {
if enabledFeatures := os.Getenv("BUILDKIT_TEST_ENABLE_FEATURES"); enabledFeatures != "" {
for _, enabledFeature := range strings.Split(enabledFeatures, ",") {
if feature == enabledFeature {
return false
}
}
}
if disabledFeatures := os.Getenv("BUILDKIT_TEST_DISABLE_FEATURES"); disabledFeatures != "" {
for _, disabledFeature := range strings.Split(disabledFeatures, ",") {
if feature == disabledFeature {
return true
}
}
}
for _, unsupportedFeature := range b.unsupportedFeatures {
if feature == unsupportedFeature {
return true
}
}
return false
}
type sandbox struct {
Backend
logs map[string]*bytes.Buffer
cleanup *multiCloser
mv matrixValue
ctx context.Context
name string
}
func (sb *sandbox) Name() string {
return sb.name
}
func (sb *sandbox) Context() context.Context {
return sb.ctx
}
func (sb *sandbox) Logs() map[string]*bytes.Buffer {
return sb.logs
}
func (sb *sandbox) PrintLogs(t *testing.T) {
printLogs(sb.logs, t.Log)
}
func (sb *sandbox) ClearLogs() {
sb.logs = make(map[string]*bytes.Buffer)
}
func (sb *sandbox) NewRegistry() (string, error) {
url, cl, err := NewRegistry("")
if err != nil {
return "", err
}
sb.cleanup.append(cl)
return url, nil
}
func (sb *sandbox) Cmd(args ...string) *exec.Cmd {
if len(args) == 1 {
if split, err := shlex.Split(args[0]); err == nil {
args = split
}
}
cmd := exec.Command("buildctl", args...)
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "BUILDKIT_HOST="+sb.Address())
return cmd
}
func (sb *sandbox) Value(k string) interface{} {
return sb.mv.values[k].value
}
func newSandbox(ctx context.Context, w Worker, mirror string, mv matrixValue) (s Sandbox, cl func() error, err error) {
cfg := &BackendConfig{
Logs: make(map[string]*bytes.Buffer),
}
var upt []ConfigUpdater
for _, v := range mv.values {
if u, ok := v.value.(ConfigUpdater); ok {
upt = append(upt, u)
}
}
if mirror != "" {
upt = append(upt, withMirrorConfig(mirror))
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if len(upt) > 0 {
dir, err := writeConfig(upt)
if err != nil {
return nil, nil, err
}
deferF.append(func() error {
return os.RemoveAll(dir)
})
cfg.ConfigFile = filepath.Join(dir, buildkitdConfigFile)
}
b, closer, err := w.New(ctx, cfg)
if err != nil {
return nil, nil, err
}
deferF.append(closer)
return &sandbox{
Backend: b,
logs: cfg.Logs,
cleanup: deferF,
mv: mv,
ctx: ctx,
name: w.Name(),
}, cl, nil
}
func getBuildkitdAddr(tmpdir string) string {
address := "unix://" + filepath.Join(tmpdir, "buildkitd.sock")
if runtime.GOOS == "windows" {
address = "//./pipe/buildkitd-" + filepath.Base(tmpdir)
}
return address
}
func runBuildkitd(ctx context.Context, conf *BackendConfig, args []string, logs map[string]*bytes.Buffer, uid, gid int, extraEnv []string) (address string, cl func() error, err error) {
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if conf.ConfigFile != "" {
args = append(args, "--config="+conf.ConfigFile)
}
tmpdir, err := os.MkdirTemp("", "bktest_buildkitd")
if err != nil {
return "", nil, err
}
if err := os.Chown(tmpdir, uid, gid); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Join(tmpdir, "tmp"), 0711); err != nil {
return "", nil, err
}
if err := os.Chown(filepath.Join(tmpdir, "tmp"), uid, gid); err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
address = getBuildkitdAddr(tmpdir)
args = append(args, "--root", tmpdir, "--addr", address, "--debug")
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // test utility
cmd.Env = append(os.Environ(), "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1", "TMPDIR="+filepath.Join(tmpdir, "tmp"))
cmd.Env = append(cmd.Env, extraEnv...)
cmd.SysProcAttr = getSysProcAttr()
stop, err := startCmd(cmd, logs)
if err != nil {
return "", nil, err
}
deferF.append(stop)
if err := waitUnix(address, 15*time.Second, cmd); err != nil {
return "", nil, err
}
deferF.append(func() error {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return errors.Wrap(err, "failed to open mountinfo")
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), tmpdir) {
return errors.Errorf("leaked mountpoint for %s", tmpdir)
}
}
return s.Err()
})
return address, cl, err
}
func getBackend(sb Sandbox) (*backend, error) {
sbx, ok := sb.(*sandbox)
if !ok {
return nil, errors.Errorf("invalid sandbox type %T", sb)
}
b, ok := sbx.Backend.(backend)
if !ok {
return nil, errors.Errorf("invalid backend type %T", b)
}
return &b, nil
}
func rootlessSupported(uid int) bool {
cmd := exec.Command("sudo", "-u", fmt.Sprintf("#%d", uid), "-i", "--", "exec", "unshare", "-U", "true") //nolint:gosec // test utility
b, err := cmd.CombinedOutput()
if err != nil {
bklog.L.Warnf("rootless mode is not supported on this host: %v (%s)", err, string(b))
return false
}
return true
}
func printLogs(logs map[string]*bytes.Buffer, f func(args ...interface{})) {
for name, l := range logs {
f(name)
s := bufio.NewScanner(l)
for s.Scan() {
f(s.Text())
}
}
}
const (
FeatureCacheExport = "cache_export"
FeatureCacheImport = "cache_import"
FeatureCacheBackendAzblob = "cache_backend_azblob"
FeatureCacheBackendGha = "cache_backend_gha"
FeatureCacheBackendInline = "cache_backend_inline"
FeatureCacheBackendLocal = "cache_backend_local"
FeatureCacheBackendRegistry = "cache_backend_registry"
FeatureCacheBackendS3 = "cache_backend_s3"
FeatureDirectPush = "direct_push"
FeatureFrontendOutline = "frontend_outline"
FeatureFrontendTargets = "frontend_targets"
FeatureImageExporter = "image_exporter"
FeatureInfo = "info"
FeatureMergeDiff = "merge_diff"
FeatureMultiCacheExport = "multi_cache_export"
FeatureMultiPlatform = "multi_platform"
FeatureOCIExporter = "oci_exporter"
FeatureOCILayout = "oci_layout"
FeatureProvenance = "provenance"
FeatureSBOM = "sbom"
FeatureSecurityMode = "security_mode"
FeatureSourceDateEpoch = "source_date_epoch"
FeatureCNINetwork = "cni_network"
)
var features = map[string]struct{}{
FeatureCacheExport: {},
FeatureCacheImport: {},
FeatureCacheBackendAzblob: {},
FeatureCacheBackendGha: {},
FeatureCacheBackendInline: {},
FeatureCacheBackendLocal: {},
FeatureCacheBackendRegistry: {},
FeatureCacheBackendS3: {},
FeatureDirectPush: {},
FeatureFrontendOutline: {},
FeatureFrontendTargets: {},
FeatureImageExporter: {},
FeatureInfo: {},
FeatureMergeDiff: {},
FeatureMultiCacheExport: {},
FeatureMultiPlatform: {},
FeatureOCIExporter: {},
FeatureOCILayout: {},
FeatureProvenance: {},
FeatureSBOM: {},
FeatureSecurityMode: {},
FeatureSourceDateEpoch: {},
FeatureCNINetwork: {},
}
func CheckFeatureCompat(t *testing.T, sb Sandbox, reason ...string) {
t.Helper()
if len(reason) == 0 {
t.Fatal("no reason provided")
}
b, err := getBackend(sb)
if err != nil {
t.Fatal(err)
}
if len(b.unsupportedFeatures) == 0 {
return
}
var ereasons []string
for _, r := range reason {
if _, ok := features[r]; ok {
if b.isUnsupportedFeature(r) {
ereasons = append(ereasons, r)
}
} else {
sb.ClearLogs()
t.Fatalf("unknown reason %q to skip test", r)
}
}
if len(ereasons) > 0 {
t.Skipf("%s worker can not currently run this test due to missing features (%s)", sb.Name(), strings.Join(ereasons, ", "))
}
}

View File

@ -0,0 +1,12 @@
//go:build !windows
// +build !windows
package integration
import "syscall"
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setsid: true, // stretch sudo needs this for sigterm
}
}

View File

@ -0,0 +1,10 @@
//go:build windows
// +build windows
package integration
import "syscall"
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{}
}

View File

@ -0,0 +1,196 @@
package integration
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/containerd/continuity/fs/fstest"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func runCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) error {
if logs != nil {
setCmdLogs(cmd, logs)
}
fmt.Fprintf(cmd.Stderr, "> runCmd %v %+v\n", time.Now(), cmd.String())
return cmd.Run()
}
func startCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) (func() error, error) {
if logs != nil {
setCmdLogs(cmd, logs)
}
fmt.Fprintf(cmd.Stderr, "> startCmd %v %+v\n", time.Now(), cmd.String())
if err := cmd.Start(); err != nil {
return nil, err
}
eg, ctx := errgroup.WithContext(context.TODO())
stopped := make(chan struct{})
stop := make(chan struct{})
eg.Go(func() error {
err := cmd.Wait()
fmt.Fprintf(cmd.Stderr, "> stopped %v %+v %v\n", time.Now(), cmd.ProcessState, cmd.ProcessState.ExitCode())
close(stopped)
select {
case <-stop:
return nil
default:
return err
}
})
eg.Go(func() error {
select {
case <-ctx.Done():
case <-stopped:
case <-stop:
fmt.Fprintf(cmd.Stderr, "> sending sigterm %v\n", time.Now())
cmd.Process.Signal(syscall.SIGTERM)
go func() {
select {
case <-stopped:
case <-time.After(20 * time.Second):
cmd.Process.Kill()
}
}()
}
return nil
})
return func() error {
close(stop)
return eg.Wait()
}, nil
}
func setCmdLogs(cmd *exec.Cmd, logs map[string]*bytes.Buffer) {
b := new(bytes.Buffer)
logs["stdout: "+cmd.String()] = b
cmd.Stdout = &lockingWriter{Writer: b}
b = new(bytes.Buffer)
logs["stderr: "+cmd.String()] = b
cmd.Stderr = &lockingWriter{Writer: b}
}
func waitUnix(address string, d time.Duration, cmd *exec.Cmd) error {
address = strings.TrimPrefix(address, "unix://")
addr, err := net.ResolveUnixAddr("unix", address)
if err != nil {
return errors.Wrapf(err, "failed resolving unix addr: %s", address)
}
step := 50 * time.Millisecond
i := 0
for {
if cmd != nil && cmd.ProcessState != nil {
return errors.Errorf("process exited: %s", cmd.String())
}
if conn, err := net.DialUnix("unix", nil, addr); err == nil {
conn.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}
type multiCloser struct {
fns []func() error
}
func (mc *multiCloser) F() func() error {
return func() error {
var err error
for i := range mc.fns {
if err1 := mc.fns[len(mc.fns)-1-i](); err == nil {
err = err1
}
}
mc.fns = nil
return err
}
}
func (mc *multiCloser) append(f func() error) {
mc.fns = append(mc.fns, f)
}
var ErrRequirements = errors.Errorf("missing requirements")
func lookupBinary(name string) error {
_, err := exec.LookPath(name)
if err != nil {
return errors.Wrapf(ErrRequirements, "failed to lookup %s binary", name)
}
return nil
}
func requireRoot() error {
if os.Getuid() != 0 {
return errors.Wrap(ErrRequirements, "requires root")
}
return nil
}
type lockingWriter struct {
mu sync.Mutex
io.Writer
}
func (w *lockingWriter) Write(dt []byte) (int, error) {
w.mu.Lock()
n, err := w.Writer.Write(dt)
w.mu.Unlock()
return n, err
}
func Tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) {
// We cannot use t.TempDir() to create a temporary directory here because
// appliers might contain fstest.CreateSocket. If the test name is too long,
// t.TempDir() could return a path that is longer than 108 characters. This
// would result in "bind: invalid argument" when we listen on the socket.
tmpdir, err := os.MkdirTemp("", "buildkit")
if err != nil {
return "", err
}
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpdir))
})
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
return tmpdir, nil
}
func randomString(n int) string {
chars := "abcdefghijklmnopqrstuvwxyz"
var b = make([]byte, n)
_, _ = rand.Read(b)
for k, v := range b {
b[k] = chars[v%byte(len(chars))]
}
return string(b)
}

50
vendor/github.com/moby/buildkit/util/testutil/tar.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package testutil
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"github.com/pkg/errors"
)
type TarItem struct {
Header *tar.Header
Data []byte
}
func ReadTarToMap(dt []byte, compressed bool) (map[string]*TarItem, error) {
m := map[string]*TarItem{}
var r io.Reader = bytes.NewBuffer(dt)
if compressed {
gz, err := gzip.NewReader(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating gzip reader")
}
defer gz.Close()
r = gz
}
tr := tar.NewReader(r)
for {
h, err := tr.Next()
if err != nil {
if err == io.EOF {
return m, nil
}
return nil, errors.Wrap(err, "error reading tar")
}
if _, ok := m[h.Name]; ok {
return nil, errors.Errorf("duplicate entries for %s", h.Name)
}
var dt []byte
if h.Typeflag == tar.TypeReg {
dt, err = io.ReadAll(tr)
if err != nil {
return nil, errors.Wrapf(err, "error reading file")
}
}
m[h.Name] = &TarItem{Header: h, Data: dt}
}
}