mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-05-18 00:47:48 +08:00

Tested with `kind` and GKE. Note: "nodes" shown in `docker buildx ls` are unrelated to Kubernetes "nodes". Probably buildx should come up with an alternative term. Usage: $ kind create cluster $ export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" $ docker buildx create --driver kubernetes --driver-opt replicas=3 --use $ docker buildx build -t foo --load . `--load` loads the image into the local Docker. Driver opts: - `image=IMAGE` - Sets the container image to be used for running buildkit. - `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace. - `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1. - `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. Defaults to false. - `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky" Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
158 lines
4.2 KiB
Go
158 lines
4.2 KiB
Go
package kubernetes
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"time"
|
|
|
|
"github.com/docker/buildx/driver"
|
|
"github.com/docker/buildx/driver/kubernetes/execconn"
|
|
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
|
"github.com/docker/buildx/util/progress"
|
|
"github.com/moby/buildkit/client"
|
|
"github.com/pkg/errors"
|
|
"github.com/sirupsen/logrus"
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/client-go/kubernetes"
|
|
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
|
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
)
|
|
|
|
const (
|
|
DriverName = "kubernetes"
|
|
)
|
|
|
|
const (
|
|
// valid values for driver-opt loadbalance
|
|
LoadbalanceRandom = "random"
|
|
LoadbalanceSticky = "sticky"
|
|
)
|
|
|
|
type Driver struct {
|
|
driver.InitConfig
|
|
factory driver.Factory
|
|
minReplicas int
|
|
deployment *appsv1.Deployment
|
|
clientset *kubernetes.Clientset
|
|
deploymentClient clientappsv1.DeploymentInterface
|
|
podClient clientcorev1.PodInterface
|
|
podChooser podchooser.PodChooser
|
|
}
|
|
|
|
func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
|
_, err := d.deploymentClient.Get(d.deployment.Name, metav1.GetOptions{})
|
|
if err != nil {
|
|
// TODO: return err if err != ErrNotFound
|
|
_, err = d.deploymentClient.Create(d.deployment)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
|
}
|
|
}
|
|
return sub.Wrap(
|
|
fmt.Sprintf("waiting for %d pods to be ready", d.minReplicas),
|
|
func() error {
|
|
if err := d.wait(ctx); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
})
|
|
})
|
|
}
|
|
|
|
func (d *Driver) wait(ctx context.Context) error {
|
|
// TODO: use watch API
|
|
var (
|
|
err error
|
|
depl *appsv1.Deployment
|
|
)
|
|
for try := 0; try < 100; try++ {
|
|
depl, err = d.deploymentClient.Get(d.deployment.Name, metav1.GetOptions{})
|
|
if err == nil {
|
|
if depl.Status.ReadyReplicas >= int32(d.minReplicas) {
|
|
return nil
|
|
}
|
|
err = errors.Errorf("expected %d replicas to be ready, got %d",
|
|
d.minReplicas, depl.Status.ReadyReplicas)
|
|
}
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
case <-time.After(time.Duration(100+try*20) * time.Millisecond):
|
|
}
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|
depl, err := d.deploymentClient.Get(d.deployment.Name, metav1.GetOptions{})
|
|
if err != nil {
|
|
// TODO: return err if err != ErrNotFound
|
|
return &driver.Info{
|
|
Status: driver.Inactive,
|
|
}, nil
|
|
}
|
|
if depl.Status.ReadyReplicas > 0 {
|
|
return &driver.Info{
|
|
Status: driver.Running,
|
|
}, nil
|
|
}
|
|
return &driver.Info{
|
|
Status: driver.Stopped,
|
|
}, nil
|
|
}
|
|
|
|
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|
// future version may scale the replicas to zero here
|
|
return nil
|
|
}
|
|
|
|
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
|
if err := d.deploymentClient.Delete(d.deployment.Name, nil); err != nil {
|
|
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|
restClient := d.clientset.CoreV1().RESTClient()
|
|
restClientConfig, err := d.KubeClientConfig.ClientConfig()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
pod, err := d.podChooser.ChoosePod(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
logrus.Infof("Using pod %q", pod.Name)
|
|
if len(pod.Spec.Containers) == 0 {
|
|
return nil, errors.Errorf("pod %s does not have any container", pod.Name)
|
|
}
|
|
containerName := pod.Spec.Containers[0].Name
|
|
cmd := []string{"buildctl", "dial-stdio"}
|
|
conn, err := execconn.ExecConn(restClient, restClientConfig,
|
|
pod.Namespace, pod.Name, containerName, cmd)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return client.New(ctx, "", client.WithDialer(func(string, time.Duration) (net.Conn, error) {
|
|
return conn, nil
|
|
}))
|
|
}
|
|
|
|
func (d *Driver) Factory() driver.Factory {
|
|
return d.factory
|
|
}
|
|
|
|
func (d *Driver) Features() map[driver.Feature]bool {
|
|
return map[driver.Feature]bool{
|
|
driver.OCIExporter: true,
|
|
driver.DockerExporter: d.DockerAPI != nil,
|
|
|
|
driver.CacheExport: true,
|
|
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
|
|
}
|
|
}
|