mirror of https://github.com/docker/buildx.git
Merge pull request #1430 from crazy-max/builder-pkg
Refactor builder and drivers info logic
This commit is contained in:
commit
b242e3280b
|
@ -6,7 +6,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
@ -20,7 +20,7 @@ type Input struct {
|
||||||
URL string
|
URL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||||
var filename string
|
var filename string
|
||||||
st, ok := detectGitContext(url)
|
st, ok := detectGitContext(url)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -33,18 +33,18 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
||||||
inp := &Input{State: st, URL: url}
|
inp := &Input{State: st, URL: url}
|
||||||
var files []File
|
var files []File
|
||||||
|
|
||||||
var di *build.DriverInfo
|
var node *builder.Node
|
||||||
for _, d := range dis {
|
for _, n := range nodes {
|
||||||
if d.Err == nil {
|
if n.Err == nil {
|
||||||
di = &d
|
node = &n
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if di == nil {
|
if node == nil {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := driver.Boot(ctx, ctx, di.Driver, pw)
|
c, err := driver.Boot(ctx, ctx, node.Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
188
build/build.go
188
build/build.go
|
@ -22,7 +22,9 @@ import (
|
||||||
"github.com/containerd/containerd/content/local"
|
"github.com/containerd/containerd/content/local"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/resolver"
|
"github.com/docker/buildx/util/resolver"
|
||||||
|
@ -31,7 +33,6 @@ import (
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||||
dockerclient "github.com/docker/docker/client"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
@ -109,28 +110,15 @@ type NamedContext struct {
|
||||||
State *llb.State
|
State *llb.State
|
||||||
}
|
}
|
||||||
|
|
||||||
type DriverInfo struct {
|
func filterAvailableNodes(nodes []builder.Node) ([]builder.Node, error) {
|
||||||
Driver driver.Driver
|
out := make([]builder.Node, 0, len(nodes))
|
||||||
Name string
|
|
||||||
Platform []specs.Platform
|
|
||||||
Err error
|
|
||||||
ImageOpt imagetools.Opt
|
|
||||||
ProxyConfig map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
type DockerAPI interface {
|
|
||||||
DockerAPI(name string) (dockerclient.APIClient, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterAvailableDrivers(drivers []DriverInfo) ([]DriverInfo, error) {
|
|
||||||
out := make([]DriverInfo, 0, len(drivers))
|
|
||||||
err := errors.Errorf("no drivers found")
|
err := errors.Errorf("no drivers found")
|
||||||
for _, di := range drivers {
|
for _, n := range nodes {
|
||||||
if di.Err == nil && di.Driver != nil {
|
if n.Err == nil && n.Driver != nil {
|
||||||
out = append(out, di)
|
out = append(out, n)
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if n.Err != nil {
|
||||||
err = di.Err
|
err = n.Err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(out) > 0 {
|
if len(out) > 0 {
|
||||||
|
@ -169,8 +157,8 @@ func allIndexes(l int) []int {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureBooted(ctx context.Context, drivers []DriverInfo, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
func ensureBooted(ctx context.Context, nodes []builder.Node, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
||||||
clients := make([]*client.Client, len(drivers))
|
clients := make([]*client.Client, len(nodes))
|
||||||
|
|
||||||
baseCtx := ctx
|
baseCtx := ctx
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
@ -178,7 +166,7 @@ func ensureBooted(ctx context.Context, drivers []DriverInfo, idxs []int, pw prog
|
||||||
for _, i := range idxs {
|
for _, i := range idxs {
|
||||||
func(i int) {
|
func(i int) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
c, err := driver.Boot(ctx, baseCtx, drivers[i].Driver, pw)
|
c, err := driver.Boot(ctx, baseCtx, nodes[i].Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -219,8 +207,8 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
func resolveDrivers(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
dps, clients, err := resolveDriversBase(ctx, drivers, opt, pw)
|
dps, clients, err := resolveDriversBase(ctx, nodes, opt, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -260,10 +248,10 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
|
||||||
return dps, clients, nil
|
return dps, clients, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
func resolveDriversBase(ctx context.Context, nodes []builder.Node, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
availablePlatforms := map[string]int{}
|
availablePlatforms := map[string]int{}
|
||||||
for i, d := range drivers {
|
for i, node := range nodes {
|
||||||
for _, p := range d.Platform {
|
for _, p := range node.Platforms {
|
||||||
availablePlatforms[platforms.Format(p)] = i
|
availablePlatforms[platforms.Format(p)] = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -281,12 +269,12 @@ func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// fast path
|
// fast path
|
||||||
if len(drivers) == 1 || len(allPlatforms) == 0 {
|
if len(nodes) == 1 || len(allPlatforms) == 0 {
|
||||||
m := map[string][]driverPair{}
|
m := map[string][]driverPair{}
|
||||||
for k, opt := range opt {
|
for k, opt := range opt {
|
||||||
m[k] = []driverPair{{driverIndex: 0, platforms: opt.Platforms}}
|
m[k] = []driverPair{{driverIndex: 0, platforms: opt.Platforms}}
|
||||||
}
|
}
|
||||||
clients, err := ensureBooted(ctx, drivers, driverIndexes(m), pw)
|
clients, err := ensureBooted(ctx, nodes, driverIndexes(m), pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -296,7 +284,7 @@ func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[strin
|
||||||
// map based on existing platforms
|
// map based on existing platforms
|
||||||
if !undetectedPlatform {
|
if !undetectedPlatform {
|
||||||
m := splitToDriverPairs(availablePlatforms, opt)
|
m := splitToDriverPairs(availablePlatforms, opt)
|
||||||
clients, err := ensureBooted(ctx, drivers, driverIndexes(m), pw)
|
clients, err := ensureBooted(ctx, nodes, driverIndexes(m), pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -304,7 +292,7 @@ func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// boot all drivers in k
|
// boot all drivers in k
|
||||||
clients, err := ensureBooted(ctx, drivers, allIndexes(len(drivers)), pw)
|
clients, err := ensureBooted(ctx, nodes, allIndexes(len(nodes)), pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -366,8 +354,8 @@ func toRepoOnly(in string) (string, error) {
|
||||||
return strings.Join(out, ","), nil
|
return strings.Join(out, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||||
d := di.Driver
|
nodeDriver := node.Driver
|
||||||
defers := make([]func(), 0, 2)
|
defers := make([]func(), 0, 2)
|
||||||
releaseF := func() {
|
releaseF := func() {
|
||||||
for _, f := range defers {
|
for _, f := range defers {
|
||||||
|
@ -399,8 +387,8 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, e := range opt.CacheTo {
|
for _, e := range opt.CacheTo {
|
||||||
if e.Type != "inline" && !d.Features()[driver.CacheExport] {
|
if e.Type != "inline" && !nodeDriver.Features()[driver.CacheExport] {
|
||||||
return nil, nil, notSupported(d, driver.CacheExport)
|
return nil, nil, notSupported(nodeDriver, driver.CacheExport)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,7 +448,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
case 1:
|
case 1:
|
||||||
// valid
|
// valid
|
||||||
case 0:
|
case 0:
|
||||||
if d.IsMobyDriver() && !noDefaultLoad() {
|
if nodeDriver.IsMobyDriver() && !noDefaultLoad() {
|
||||||
// backwards compat for docker driver only:
|
// backwards compat for docker driver only:
|
||||||
// this ensures the build results in a docker image.
|
// this ensures the build results in a docker image.
|
||||||
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
opt.Exports = []client.ExportEntry{{Type: "image", Attrs: map[string]string{}}}
|
||||||
|
@ -509,15 +497,15 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
if (e.Type == "local" || e.Type == "tar") && opt.ImageIDFile != "" {
|
if (e.Type == "local" || e.Type == "tar") && opt.ImageIDFile != "" {
|
||||||
return nil, nil, errors.Errorf("local and tar exporters are incompatible with image ID file")
|
return nil, nil, errors.Errorf("local and tar exporters are incompatible with image ID file")
|
||||||
}
|
}
|
||||||
if e.Type == "oci" && !d.Features()[driver.OCIExporter] {
|
if e.Type == "oci" && !nodeDriver.Features()[driver.OCIExporter] {
|
||||||
return nil, nil, notSupported(d, driver.OCIExporter)
|
return nil, nil, notSupported(nodeDriver, driver.OCIExporter)
|
||||||
}
|
}
|
||||||
if e.Type == "docker" {
|
if e.Type == "docker" {
|
||||||
if len(opt.Platforms) > 1 {
|
if len(opt.Platforms) > 1 {
|
||||||
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||||
}
|
}
|
||||||
if e.Output == nil {
|
if e.Output == nil {
|
||||||
if d.IsMobyDriver() {
|
if nodeDriver.IsMobyDriver() {
|
||||||
e.Type = "image"
|
e.Type = "image"
|
||||||
} else {
|
} else {
|
||||||
w, cancel, err := dl(e.Attrs["context"])
|
w, cancel, err := dl(e.Attrs["context"])
|
||||||
|
@ -527,11 +515,11 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
defers = append(defers, cancel)
|
defers = append(defers, cancel)
|
||||||
opt.Exports[i].Output = wrapWriteCloser(w)
|
opt.Exports[i].Output = wrapWriteCloser(w)
|
||||||
}
|
}
|
||||||
} else if !d.Features()[driver.DockerExporter] {
|
} else if !nodeDriver.Features()[driver.DockerExporter] {
|
||||||
return nil, nil, notSupported(d, driver.DockerExporter)
|
return nil, nil, notSupported(nodeDriver, driver.DockerExporter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e.Type == "image" && d.IsMobyDriver() {
|
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||||
opt.Exports[i].Type = "moby"
|
opt.Exports[i].Type = "moby"
|
||||||
if e.Attrs["push"] != "" {
|
if e.Attrs["push"] != "" {
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
@ -552,7 +540,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
so.Exports = opt.Exports
|
so.Exports = opt.Exports
|
||||||
so.Session = opt.Session
|
so.Session = opt.Session
|
||||||
|
|
||||||
releaseLoad, err := LoadInputs(ctx, d, opt.Inputs, pw, &so)
|
releaseLoad, err := LoadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -584,7 +572,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
so.FrontendAttrs["label:"+k] = v
|
so.FrontendAttrs["label:"+k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range di.ProxyConfig {
|
for k, v := range node.ProxyConfig {
|
||||||
if _, ok := opt.BuildArgs[k]; !ok {
|
if _, ok := opt.BuildArgs[k]; !ok {
|
||||||
so.FrontendAttrs["build-arg:"+k] = v
|
so.FrontendAttrs["build-arg:"+k] = v
|
||||||
}
|
}
|
||||||
|
@ -596,8 +584,8 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
for i, p := range opt.Platforms {
|
for i, p := range opt.Platforms {
|
||||||
pp[i] = platforms.Format(p)
|
pp[i] = platforms.Format(p)
|
||||||
}
|
}
|
||||||
if len(pp) > 1 && !d.Features()[driver.MultiPlatform] {
|
if len(pp) > 1 && !nodeDriver.Features()[driver.MultiPlatform] {
|
||||||
return nil, nil, notSupported(d, driver.MultiPlatform)
|
return nil, nil, notSupported(nodeDriver, driver.MultiPlatform)
|
||||||
}
|
}
|
||||||
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||||
}
|
}
|
||||||
|
@ -615,7 +603,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup extrahosts
|
// setup extrahosts
|
||||||
extraHosts, err := toBuildkitExtraHosts(opt.ExtraHosts, d.IsMobyDriver())
|
extraHosts, err := toBuildkitExtraHosts(opt.ExtraHosts, nodeDriver.IsMobyDriver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -782,24 +770,24 @@ func Invoke(ctx context.Context, cfg ContainerConfig) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
return BuildWithResultHandler(ctx, drivers, opt, docker, configDir, w, nil, false)
|
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
|
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
|
||||||
if len(drivers) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
|
|
||||||
drivers, err = filterAvailableDrivers(drivers)
|
nodes, err = filterAvailableNodes(nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "no valid drivers found")
|
return nil, errors.Wrapf(err, "no valid drivers found")
|
||||||
}
|
}
|
||||||
|
|
||||||
var noMobyDriver driver.Driver
|
var noMobyDriver driver.Driver
|
||||||
for _, d := range drivers {
|
for _, n := range nodes {
|
||||||
if !d.Driver.IsMobyDriver() {
|
if !n.Driver.IsMobyDriver() {
|
||||||
noMobyDriver = d.Driver
|
noMobyDriver = n.Driver
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -823,7 +811,7 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
|
m, clients, err := resolveDrivers(ctx, nodes, opt, w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -857,14 +845,14 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
||||||
for k, opt := range opt {
|
for k, opt := range opt {
|
||||||
multiDriver := len(m[k]) > 1
|
multiDriver := len(m[k]) > 1
|
||||||
hasMobyDriver := false
|
hasMobyDriver := false
|
||||||
for i, dp := range m[k] {
|
for i, np := range m[k] {
|
||||||
di := drivers[dp.driverIndex]
|
node := nodes[np.driverIndex]
|
||||||
if di.Driver.IsMobyDriver() {
|
if node.Driver.IsMobyDriver() {
|
||||||
hasMobyDriver = true
|
hasMobyDriver = true
|
||||||
}
|
}
|
||||||
opt.Platforms = dp.platforms
|
opt.Platforms = np.platforms
|
||||||
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
so, release, err := toSolveOpt(ctx, node, multiDriver, opt, np.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||||
return newDockerLoader(ctx, docker, name, w)
|
return docker.LoadImage(ctx, name, w)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -992,7 +980,7 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
||||||
if len(descs) > 0 {
|
if len(descs) > 0 {
|
||||||
var imageopt imagetools.Opt
|
var imageopt imagetools.Opt
|
||||||
for _, dp := range dps {
|
for _, dp := range dps {
|
||||||
imageopt = drivers[dp.driverIndex].ImageOpt
|
imageopt = nodes[dp.driverIndex].ImageOpt
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
names := strings.Split(pushNames, ",")
|
names := strings.Split(pushNames, ",")
|
||||||
|
@ -1191,8 +1179,8 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
||||||
rr.ExporterResponse[k] = string(v)
|
rr.ExporterResponse[k] = string(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
d := drivers[dp.driverIndex].Driver
|
node := nodes[dp.driverIndex].Driver
|
||||||
if d.IsMobyDriver() {
|
if node.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
if e.Type == "moby" && e.Attrs["push"] != "" {
|
if e.Type == "moby" && e.Attrs["push"] != "" {
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||||
|
@ -1204,12 +1192,12 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
||||||
pushList := strings.Split(pushNames, ",")
|
pushList := strings.Split(pushNames, ",")
|
||||||
for _, name := range pushList {
|
for _, name := range pushList {
|
||||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||||
return pushWithMoby(ctx, d, name, l)
|
return pushWithMoby(ctx, node, name, l)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
remoteDigest, err := remoteDigestWithMoby(ctx, node, pushList[0])
|
||||||
if err == nil && remoteDigest != "" {
|
if err == nil && remoteDigest != "" {
|
||||||
// old daemons might not have containerimage.config.digest set
|
// old daemons might not have containerimage.config.digest set
|
||||||
// in response so use containerimage.digest value for it if available
|
// in response so use containerimage.digest value for it if available
|
||||||
|
@ -1631,40 +1619,6 @@ func notSupported(d driver.Driver, f driver.Feature) error {
|
||||||
|
|
||||||
type dockerLoadCallback func(name string) (io.WriteCloser, func(), error)
|
type dockerLoadCallback func(name string) (io.WriteCloser, func(), error)
|
||||||
|
|
||||||
func newDockerLoader(ctx context.Context, d DockerAPI, name string, status progress.Writer) (io.WriteCloser, func(), error) {
|
|
||||||
c, err := d.DockerAPI(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
var w *waitingWriter
|
|
||||||
w = &waitingWriter{
|
|
||||||
PipeWriter: pw,
|
|
||||||
f: func() {
|
|
||||||
resp, err := c.ImageLoad(ctx, pr, false)
|
|
||||||
defer close(done)
|
|
||||||
if err != nil {
|
|
||||||
pr.CloseWithError(err)
|
|
||||||
w.mu.Lock()
|
|
||||||
w.err = err
|
|
||||||
w.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
prog := progress.WithPrefix(status, "", false)
|
|
||||||
progress.FromReader(prog, "importing to docker", resp.Body)
|
|
||||||
},
|
|
||||||
done: done,
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
|
||||||
return w, func() {
|
|
||||||
pr.Close()
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func noDefaultLoad() bool {
|
func noDefaultLoad() bool {
|
||||||
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
v, ok := os.LookupEnv("BUILDX_NO_DEFAULT_LOAD")
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -1677,34 +1631,6 @@ func noDefaultLoad() bool {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
type waitingWriter struct {
|
|
||||||
*io.PipeWriter
|
|
||||||
f func()
|
|
||||||
once sync.Once
|
|
||||||
mu sync.Mutex
|
|
||||||
err error
|
|
||||||
done chan struct{}
|
|
||||||
cancel func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
|
||||||
w.once.Do(func() {
|
|
||||||
go w.f()
|
|
||||||
})
|
|
||||||
return w.PipeWriter.Write(dt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *waitingWriter) Close() error {
|
|
||||||
err := w.PipeWriter.Close()
|
|
||||||
<-w.done
|
|
||||||
if err == nil {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
return w.err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle https://github.com/moby/moby/pull/10858
|
// handle https://github.com/moby/moby/pull/10858
|
||||||
func handleLowercaseDockerfile(dir, p string) string {
|
func handleLowercaseDockerfile(dir, p string) string {
|
||||||
if filepath.Base(p) != "Dockerfile" {
|
if filepath.Base(p) != "Dockerfile" {
|
||||||
|
|
|
@ -0,0 +1,295 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/imagetools"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Builder represents an active builder object
|
||||||
|
type Builder struct {
|
||||||
|
*store.NodeGroup
|
||||||
|
driverFactory driverFactory
|
||||||
|
nodes []Node
|
||||||
|
opts builderOpts
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type builderOpts struct {
|
||||||
|
dockerCli command.Cli
|
||||||
|
name string
|
||||||
|
txn *store.Txn
|
||||||
|
contextPathHash string
|
||||||
|
validate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option provides a variadic option for configuring the builder.
|
||||||
|
type Option func(b *Builder)
|
||||||
|
|
||||||
|
// WithName sets builder name.
|
||||||
|
func WithName(name string) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStore sets a store instance used at init.
|
||||||
|
func WithStore(txn *store.Txn) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.txn = txn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContextPathHash is used for determining pods in k8s driver instance.
|
||||||
|
func WithContextPathHash(contextPathHash string) Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.contextPathHash = contextPathHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSkippedValidation skips builder context validation.
|
||||||
|
func WithSkippedValidation() Option {
|
||||||
|
return func(b *Builder) {
|
||||||
|
b.opts.validate = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New initializes a new builder client
|
||||||
|
func New(dockerCli command.Cli, opts ...Option) (_ *Builder, err error) {
|
||||||
|
b := &Builder{
|
||||||
|
opts: builderOpts{
|
||||||
|
dockerCli: dockerCli,
|
||||||
|
validate: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.opts.txn == nil {
|
||||||
|
// if store instance is nil we create a short-lived one using the
|
||||||
|
// default store and ensure we release it on completion
|
||||||
|
var release func()
|
||||||
|
b.opts.txn, release, err = storeutil.GetStore(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.opts.name != "" {
|
||||||
|
if b.NodeGroup, err = storeutil.GetNodeGroup(b.opts.txn, dockerCli, b.opts.name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if b.NodeGroup, err = storeutil.GetCurrentInstance(b.opts.txn, dockerCli); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.NodeGroup.Name == "default" && len(b.NodeGroup.Nodes) == 1 {
|
||||||
|
b.NodeGroup.Name = b.NodeGroup.Nodes[0].Endpoint
|
||||||
|
}
|
||||||
|
if b.opts.validate {
|
||||||
|
if err = b.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate validates builder context
|
||||||
|
func (b *Builder) Validate() error {
|
||||||
|
if b.NodeGroup.Name == "default" && b.NodeGroup.Name != b.opts.dockerCli.CurrentContext() {
|
||||||
|
return errors.Errorf("use `docker --context=default buildx` to switch to default context")
|
||||||
|
}
|
||||||
|
list, err := b.opts.dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == b.NodeGroup.Name && b.NodeGroup.Name != "default" {
|
||||||
|
return errors.Errorf("use `docker --context=%s buildx` to switch to context %q", b.NodeGroup.Name, b.NodeGroup.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextName returns builder context name if available.
|
||||||
|
func (b *Builder) ContextName() string {
|
||||||
|
ctxbuilders, err := b.opts.dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
for _, cb := range ctxbuilders {
|
||||||
|
if b.NodeGroup.Driver == "docker" && len(b.NodeGroup.Nodes) == 1 && b.NodeGroup.Nodes[0].Endpoint == cb.Name {
|
||||||
|
return cb.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageOpt returns registry auth configuration
|
||||||
|
func (b *Builder) ImageOpt() (imagetools.Opt, error) {
|
||||||
|
return storeutil.GetImageConfig(b.opts.dockerCli, b.NodeGroup)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boot bootstrap a builder
|
||||||
|
func (b *Builder) Boot(ctx context.Context) (bool, error) {
|
||||||
|
toBoot := make([]int, 0, len(b.nodes))
|
||||||
|
for idx, d := range b.nodes {
|
||||||
|
if d.Err != nil || d.Driver == nil || d.DriverInfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if d.DriverInfo.Status != driver.Running {
|
||||||
|
toBoot = append(toBoot, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(toBoot) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
for _, idx := range toBoot {
|
||||||
|
func(idx int) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
pw := progress.WithPrefix(printer, b.NodeGroup.Nodes[idx].Name, len(toBoot) > 1)
|
||||||
|
_, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
b.nodes[idx].Err = err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = eg.Wait()
|
||||||
|
err1 := printer.Wait()
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inactive checks if all nodes are inactive for this builder.
|
||||||
|
func (b *Builder) Inactive() bool {
|
||||||
|
for _, d := range b.nodes {
|
||||||
|
if d.DriverInfo != nil && d.DriverInfo.Status == driver.Running {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns error if any.
|
||||||
|
func (b *Builder) Err() error {
|
||||||
|
return b.err
|
||||||
|
}
|
||||||
|
|
||||||
|
type driverFactory struct {
|
||||||
|
driver.Factory
|
||||||
|
once sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// Factory returns the driver factory.
|
||||||
|
func (b *Builder) Factory(ctx context.Context) (_ driver.Factory, err error) {
|
||||||
|
b.driverFactory.once.Do(func() {
|
||||||
|
if b.Driver != "" {
|
||||||
|
b.driverFactory.Factory, err = driver.GetFactory(b.Driver, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// empty driver means nodegroup was implicitly created as a default
|
||||||
|
// driver for a docker context and allows falling back to a
|
||||||
|
// docker-container driver for older daemon that doesn't support
|
||||||
|
// buildkit (< 18.06).
|
||||||
|
ep := b.nodes[0].Endpoint
|
||||||
|
var dockerapi *dockerutil.ClientAPI
|
||||||
|
dockerapi, err = dockerutil.NewClientAPI(b.opts.dockerCli, b.nodes[0].Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// check if endpoint is healthy is needed to determine the driver type.
|
||||||
|
// if this fails then can't continue with driver selection.
|
||||||
|
if _, err = dockerapi.Ping(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.driverFactory.Factory, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.Driver = b.driverFactory.Factory.Name()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return b.driverFactory.Factory, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBuilders returns all builders
|
||||||
|
func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||||
|
storeng, err := txn.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
builders := make([]*Builder, len(storeng))
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
for i, ng := range storeng {
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(ng.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builders[i] = b
|
||||||
|
seen[b.NodeGroup.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Slice(contexts, func(i, j int) bool {
|
||||||
|
return contexts[i].Name < contexts[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, c := range contexts {
|
||||||
|
// if a context has the same name as an instance from the store, do not
|
||||||
|
// add it to the builders list. An instance from the store takes
|
||||||
|
// precedence over context builders.
|
||||||
|
if _, ok := seen[c.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b, err := New(dockerCli,
|
||||||
|
WithName(c.Name),
|
||||||
|
WithStore(txn),
|
||||||
|
WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builders = append(builders, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return builders, nil
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
|
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
|
"github.com/docker/buildx/util/imagetools"
|
||||||
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
store.Node
|
||||||
|
Driver driver.Driver
|
||||||
|
DriverInfo *driver.Info
|
||||||
|
Platforms []ocispecs.Platform
|
||||||
|
ImageOpt imagetools.Opt
|
||||||
|
ProxyConfig map[string]string
|
||||||
|
Version string
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes returns nodes for this builder.
|
||||||
|
func (b *Builder) Nodes() []Node {
|
||||||
|
return b.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadNodes loads and returns nodes for this builder.
|
||||||
|
// TODO: this should be a method on a Node object and lazy load data for each driver.
|
||||||
|
func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) {
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
b.nodes = make([]Node, len(b.NodeGroup.Nodes))
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if b.err == nil && err != nil {
|
||||||
|
b.err = err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
factory, err := b.Factory(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageopt, err := b.ImageOpt()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range b.NodeGroup.Nodes {
|
||||||
|
func(i int, n store.Node) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
node := Node{
|
||||||
|
Node: n,
|
||||||
|
ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli),
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
b.nodes[i] = node
|
||||||
|
}()
|
||||||
|
|
||||||
|
dockerapi, err := dockerutil.NewClientAPI(b.opts.dockerCli, n.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
node.Err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
contextStore := b.opts.dockerCli.ContextStore()
|
||||||
|
|
||||||
|
var kcc driver.KubeClientConfig
|
||||||
|
kcc, err = ctxkube.ConfigFromContext(n.Endpoint, contextStore)
|
||||||
|
if err != nil {
|
||||||
|
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||||
|
// try again with name="default".
|
||||||
|
// FIXME(@AkihiroSuda): n should retain real context name.
|
||||||
|
kcc, err = ctxkube.ConfigFromContext("default", contextStore)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tryToUseKubeConfigInCluster := false
|
||||||
|
if kcc == nil {
|
||||||
|
tryToUseKubeConfigInCluster = true
|
||||||
|
} else {
|
||||||
|
if _, err := kcc.ClientConfig(); err != nil {
|
||||||
|
tryToUseKubeConfigInCluster = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tryToUseKubeConfigInCluster {
|
||||||
|
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||||
|
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||||
|
logrus.Debug("using kube config in cluster")
|
||||||
|
kcc = kccInCluster
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash)
|
||||||
|
if err != nil {
|
||||||
|
node.Err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node.Driver = d
|
||||||
|
node.ImageOpt = imageopt
|
||||||
|
|
||||||
|
if withData {
|
||||||
|
if err := node.loadData(ctx); err != nil {
|
||||||
|
node.Err = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(i, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: This should be done in the routine loading driver data
|
||||||
|
if withData {
|
||||||
|
kubernetesDriverCount := 0
|
||||||
|
for _, d := range b.nodes {
|
||||||
|
if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 {
|
||||||
|
kubernetesDriverCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isAllKubernetesDrivers := len(b.nodes) == kubernetesDriverCount
|
||||||
|
if isAllKubernetesDrivers {
|
||||||
|
var nodes []Node
|
||||||
|
var dynamicNodes []store.Node
|
||||||
|
for _, di := range b.nodes {
|
||||||
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
|
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
|
||||||
|
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
|
||||||
|
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
|
||||||
|
diClone := di
|
||||||
|
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||||
|
diClone.Platforms = pl
|
||||||
|
}
|
||||||
|
nodes = append(nodes, di)
|
||||||
|
}
|
||||||
|
dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// not append (remove the static nodes in the store)
|
||||||
|
b.NodeGroup.Nodes = dynamicNodes
|
||||||
|
b.nodes = nodes
|
||||||
|
b.NodeGroup.Dynamic = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) loadData(ctx context.Context) error {
|
||||||
|
if n.Driver == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
info, err := n.Driver.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.DriverInfo = info
|
||||||
|
if n.DriverInfo.Status == driver.Running {
|
||||||
|
driverClient, err := n.Driver.Client(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
workers, err := driverClient.ListWorkers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "listing workers")
|
||||||
|
}
|
||||||
|
for _, w := range workers {
|
||||||
|
n.Platforms = append(n.Platforms, w.Platforms...)
|
||||||
|
}
|
||||||
|
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||||
|
inf, err := driverClient.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
|
||||||
|
n.Version, err = n.Driver.Version(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
n.Version = inf.BuildkitVersion.Version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -9,7 +9,9 @@ import (
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/docker/buildx/bake"
|
"github.com/docker/buildx/bake"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
@ -89,20 +91,27 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var dis []build.DriverInfo
|
var nodes []builder.Node
|
||||||
var files []bake.File
|
var files []bake.File
|
||||||
var inp *bake.Input
|
var inp *bake.Input
|
||||||
|
|
||||||
// instance only needed for reading remote bake files or building
|
// instance only needed for reading remote bake files or building
|
||||||
if url != "" || !in.printOnly {
|
if url != "" || !in.printOnly {
|
||||||
dis, err = getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
|
builder.WithContextPathHash(contextPathHash),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nodes, err = b.LoadNodes(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if url != "" {
|
if url != "" {
|
||||||
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer)
|
||||||
} else {
|
} else {
|
||||||
files, err = bake.ReadLocalFiles(in.files)
|
files, err = bake.ReadLocalFiles(in.files)
|
||||||
}
|
}
|
||||||
|
@ -146,7 +155,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapBuildError(err, true)
|
return wrapBuildError(err, true)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,11 @@ import (
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/monitor"
|
"github.com/docker/buildx/monitor"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
|
@ -237,7 +239,19 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||||
contextPathHash = in.contextPath
|
contextPathHash = in.contextPath
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, in.invoke != "")
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
|
builder.WithContextPathHash(contextPathHash),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nodes, err := b.LoadNodes(ctx, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "")
|
||||||
err = wrapBuildError(err, false)
|
err = wrapBuildError(err, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -254,7 +268,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||||
return errors.Errorf("failed to configure terminal: %v", err)
|
return errors.Errorf("failed to configure terminal: %v", err)
|
||||||
}
|
}
|
||||||
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
||||||
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, true)
|
_, rr, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, true)
|
||||||
return rr, err
|
return rr, err
|
||||||
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -275,12 +289,7 @@ type nopCloser struct {
|
||||||
|
|
||||||
func (c nopCloser) Close() error { return nil }
|
func (c nopCloser) Close() error { return nil }
|
||||||
|
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -291,7 +300,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||||
|
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var idx int
|
var idx int
|
||||||
resp, err := build.BuildWithResultHandler(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
|
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
if res == nil || driverIndex < idx {
|
if res == nil || driverIndex < idx {
|
||||||
|
|
|
@ -10,13 +10,17 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
|
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
dopts "github.com/docker/cli/opts"
|
||||||
"github.com/google/shlex"
|
"github.com/google/shlex"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -204,7 +208,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
||||||
}
|
}
|
||||||
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err = dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -234,17 +238,26 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(ng.Name),
|
||||||
|
builder.WithStore(txn),
|
||||||
|
builder.WithSkippedValidation(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, info := range ngi.drivers {
|
|
||||||
if err := info.di.Err; err != nil {
|
for _, node := range nodes {
|
||||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, info.di.Name, err)
|
if err := node.Err; err != nil {
|
||||||
|
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||||
var err2 error
|
var err2 error
|
||||||
if ngOriginal == nil {
|
if ngOriginal == nil {
|
||||||
err2 = txn.Remove(ng.Name)
|
err2 = txn.Remove(ng.Name)
|
||||||
|
@ -259,7 +272,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.use && ep != "" {
|
if in.use && ep != "" {
|
||||||
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
current, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -269,7 +282,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
if _, err = boot(ctx, ngi); err != nil {
|
if _, err = b.Boot(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -340,3 +353,27 @@ func csvToMap(in []string) (map[string]string, error) {
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||||
|
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||||
|
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||||
|
if err == nil && dem != nil {
|
||||||
|
if ep == "default" {
|
||||||
|
return dem.Host, nil
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
h, err := dopts.ParseHost(true, ep)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||||
|
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||||
|
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return ep, nil
|
||||||
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
|
@ -33,25 +33,29 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, di := range dis {
|
nodes, err := b.LoadNodes(ctx, false)
|
||||||
if di.Err != nil {
|
if err != nil {
|
||||||
return di.Err
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([][]*client.UsageInfo, len(dis))
|
out := make([][]*client.UsageInfo, len(nodes))
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for i, di := range dis {
|
for i, node := range nodes {
|
||||||
func(i int, di build.DriverInfo) {
|
func(i int, node builder.Node) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
if di.Driver != nil {
|
if node.Driver != nil {
|
||||||
c, err := di.Driver.Client(ctx)
|
c, err := node.Driver.Client(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -64,7 +68,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(i, di)
|
}(i, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
|
|
|
@ -7,8 +7,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
@ -113,27 +112,11 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
|
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
imageopt, err := b.ImageOpt()
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
"github.com/docker/cli-docs-tool/annotation"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
|
@ -25,27 +24,11 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
return errors.Errorf("format and raw cannot be used together")
|
return errors.Errorf("format and raw cannot be used together")
|
||||||
}
|
}
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
imageopt, err := b.ImageOpt()
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,8 +8,7 @@ import (
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
@ -25,71 +24,43 @@ type inspectOptions struct {
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
|
builder.WithSkippedValidation(),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng == nil {
|
|
||||||
ng = &store.NodeGroup{
|
|
||||||
Name: "default",
|
|
||||||
Nodes: []store.Node{{
|
|
||||||
Name: "default",
|
|
||||||
Endpoint: "default",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
|
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||||
|
|
||||||
var bootNgi *nginfo
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, err = boot(ctx, ngi)
|
ok, err = b.Boot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
bootNgi = ngi
|
|
||||||
if ok {
|
if ok {
|
||||||
ngi = &nginfo{ng: ng}
|
nodes, err = b.LoadNodes(timeoutCtx, true)
|
||||||
err = loadNodeGroupData(ctx, dockerCli, ngi)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||||
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
|
fmt.Fprintf(w, "Name:\t%s\n", b.Name)
|
||||||
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
|
fmt.Fprintf(w, "Driver:\t%s\n", b.Driver)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if ngi.err != nil {
|
} else if b.Err() != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", ngi.err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", b.Err().Error())
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fmt.Fprintln(w, "")
|
fmt.Fprintln(w, "")
|
||||||
fmt.Fprintln(w, "Nodes:")
|
fmt.Fprintln(w, "Nodes:")
|
||||||
|
|
||||||
for i, n := range ngi.ng.Nodes {
|
for i, n := range nodes {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
fmt.Fprintln(w, "")
|
fmt.Fprintln(w, "")
|
||||||
}
|
}
|
||||||
|
@ -104,21 +75,17 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||||
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ngi.drivers[i].di.Err; err != nil {
|
if err := n.Err; err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if err := ngi.drivers[i].err; err != nil {
|
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
|
||||||
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
|
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
|
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
||||||
if len(n.Flags) > 0 {
|
if len(n.Flags) > 0 {
|
||||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
||||||
}
|
}
|
||||||
if ngi.drivers[i].version != "" {
|
if nodes[i].Version != "" {
|
||||||
fmt.Fprintf(w, "Buildkit:\t%s\n", ngi.drivers[i].version)
|
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
115
commands/ls.go
115
commands/ls.go
|
@ -4,12 +4,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
@ -32,52 +31,24 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
builders, err := builder.GetBuilders(dockerCli, txn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
ll, err := txn.List()
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
builders := make([]*nginfo, len(ll))
|
|
||||||
for i, ng := range ll {
|
|
||||||
builders[i] = &nginfo{ng: ng}
|
|
||||||
}
|
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
|
||||||
return contexts[i].Name < contexts[j].Name
|
|
||||||
})
|
|
||||||
for _, c := range contexts {
|
|
||||||
ngi := &nginfo{ng: &store.NodeGroup{
|
|
||||||
Name: c.Name,
|
|
||||||
Nodes: []store.Node{{
|
|
||||||
Name: c.Name,
|
|
||||||
Endpoint: c.Name,
|
|
||||||
}},
|
|
||||||
}}
|
|
||||||
// if a context has the same name as an instance from the store, do not
|
|
||||||
// add it to the builders list. An instance from the store takes
|
|
||||||
// precedence over context builders.
|
|
||||||
if hasNodeGroup(builders, ngi) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
builders = append(builders, ngi)
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
|
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
func(b *nginfo) {
|
func(b *builder.Builder) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
err = loadNodeGroupData(ctx, dockerCli, b)
|
_, _ = b.LoadNodes(timeoutCtx, true)
|
||||||
if b.err == nil && err != nil {
|
|
||||||
b.err = err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(b)
|
}(b)
|
||||||
|
@ -87,29 +58,15 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
currentName := "default"
|
|
||||||
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if current != nil {
|
|
||||||
currentName = current.Name
|
|
||||||
if current.Name == "default" {
|
|
||||||
currentName = current.Nodes[0].Endpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
||||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
||||||
|
|
||||||
currentSet := false
|
|
||||||
printErr := false
|
printErr := false
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if !currentSet && b.ng.Name == currentName {
|
if current.Name == b.Name {
|
||||||
b.ng.Name += " *"
|
b.Name += " *"
|
||||||
currentSet = true
|
|
||||||
}
|
}
|
||||||
if ok := printngi(w, b); !ok {
|
if ok := printBuilder(w, b); !ok {
|
||||||
printErr = true
|
printErr = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -119,19 +76,12 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
if printErr {
|
if printErr {
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if b.err != nil {
|
if b.Err() != nil {
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.ng.Name, strings.TrimSpace(b.err.Error()))
|
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error()))
|
||||||
} else {
|
} else {
|
||||||
for idx, n := range b.ng.Nodes {
|
for _, d := range b.Nodes() {
|
||||||
d := b.drivers[idx]
|
if d.Err != nil {
|
||||||
var nodeErr string
|
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error()))
|
||||||
if d.err != nil {
|
|
||||||
nodeErr = d.err.Error()
|
|
||||||
} else if d.di.Err != nil {
|
|
||||||
nodeErr = d.di.Err.Error()
|
|
||||||
}
|
|
||||||
if nodeErr != "" {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.ng.Name, n.Name, strings.TrimSpace(nodeErr))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,26 +91,25 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printngi(w io.Writer, ngi *nginfo) (ok bool) {
|
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) {
|
||||||
ok = true
|
ok = true
|
||||||
var err string
|
var err string
|
||||||
if ngi.err != nil {
|
if b.Err() != nil {
|
||||||
ok = false
|
ok = false
|
||||||
err = "error"
|
err = "error"
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", ngi.ng.Name, ngi.ng.Driver, err)
|
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
|
||||||
if ngi.err == nil {
|
if b.Err() == nil {
|
||||||
for idx, n := range ngi.ng.Nodes {
|
for _, n := range b.Nodes() {
|
||||||
d := ngi.drivers[idx]
|
|
||||||
var status string
|
var status string
|
||||||
if d.info != nil {
|
if n.DriverInfo != nil {
|
||||||
status = d.info.Status.String()
|
status = n.DriverInfo.Status.String()
|
||||||
}
|
}
|
||||||
if d.err != nil || d.di.Err != nil {
|
if n.Err != nil {
|
||||||
ok = false
|
ok = false
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, d.version, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
|
@ -54,14 +54,18 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, di := range dis {
|
nodes, err := b.LoadNodes(ctx, false)
|
||||||
if di.Err != nil {
|
if err != nil {
|
||||||
return di.Err
|
return err
|
||||||
|
}
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Err != nil {
|
||||||
|
return node.Err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,11 +94,11 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, di := range dis {
|
for _, node := range nodes {
|
||||||
func(di build.DriverInfo) {
|
func(node builder.Node) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
if di.Driver != nil {
|
if node.Driver != nil {
|
||||||
c, err := di.Driver.Client(ctx)
|
c, err := node.Driver.Client(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -109,7 +113,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(di)
|
}(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
|
@ -44,41 +45,33 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
b, err := builder.New(dockerCli,
|
||||||
if in.builder != "" {
|
builder.WithName(in.builder),
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
builder.WithStore(txn),
|
||||||
if err != nil {
|
builder.WithSkippedValidation(),
|
||||||
return err
|
)
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ng == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctxbuilders, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, cb := range ctxbuilders {
|
|
||||||
if ng.Driver == "docker" && len(ng.Nodes) == 1 && ng.Nodes[0].Endpoint == cb.Name {
|
nodes, err := b.LoadNodes(ctx, false)
|
||||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb.Name)
|
if err != nil {
|
||||||
}
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err1 := rm(ctx, dockerCli, in, ng)
|
if cb := b.ContextName(); cb != "" {
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
err1 := rm(ctx, nodes, in)
|
||||||
|
if err := txn.Remove(b.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
return err1
|
return err1
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", ng.Name)
|
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,61 +103,56 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
func rm(ctx context.Context, nodes []builder.Node, in rmOptions) (err error) {
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
for _, node := range nodes {
|
||||||
if err != nil {
|
if node.Driver == nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, di := range dis {
|
|
||||||
if di.Driver == nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
||||||
if !in.keepDaemon {
|
if !in.keepDaemon {
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
if err := node.Driver.Stop(ctx, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
if err := node.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if node.Err != nil {
|
||||||
err = di.Err
|
err = node.Err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
builders, err := builder.GetBuilders(dockerCli, txn)
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ll, err := txn.List()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
builders := make([]*nginfo, len(ll))
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
for i, ng := range ll {
|
defer cancel()
|
||||||
builders[i] = &nginfo{ng: ng}
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
func(b *nginfo) {
|
func(b *builder.Builder) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "cannot load %s", b.Name)
|
||||||
}
|
}
|
||||||
if b.ng.Dynamic {
|
if cb := b.ContextName(); cb != "" {
|
||||||
|
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||||
|
}
|
||||||
|
if b.Dynamic {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if b.inactive() {
|
if b.Inactive() {
|
||||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
rmerr := rm(ctx, nodes, in)
|
||||||
if err := txn.Remove(b.ng.Name); err != nil {
|
if err := txn.Remove(b.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.ng.Name)
|
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
||||||
return rmerr
|
return rmerr
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -3,8 +3,7 @@ package commands
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/builder"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
|
@ -18,32 +17,19 @@ type stopOptions struct {
|
||||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
b, err := builder.New(dockerCli,
|
||||||
|
builder.WithName(in.builder),
|
||||||
|
builder.WithSkippedValidation(),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
nodes, err := b.LoadNodes(ctx, false)
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := stop(ctx, dockerCli, ng); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ng != nil {
|
|
||||||
return stop(ctx, dockerCli, ng)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stopCurrent(ctx, dockerCli)
|
return stop(ctx, nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
|
@ -65,37 +51,15 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
|
func stop(ctx context.Context, nodes []builder.Node) (err error) {
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
for _, node := range nodes {
|
||||||
if err != nil {
|
if node.Driver != nil {
|
||||||
return err
|
if err := node.Driver.Stop(ctx, true); err != nil {
|
||||||
}
|
|
||||||
for _, di := range dis {
|
|
||||||
if di.Driver != nil {
|
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if node.Err != nil {
|
||||||
err = di.Err
|
err = node.Err
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
|
|
||||||
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, di := range dis {
|
|
||||||
if di.Driver != nil {
|
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if di.Err != nil {
|
|
||||||
err = di.Err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -29,7 +30,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||||
return errors.Errorf("run `docker context use default` to switch to default context")
|
return errors.Errorf("run `docker context use default` to switch to default context")
|
||||||
}
|
}
|
||||||
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
||||||
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -52,7 +53,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|
||||||
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
464
commands/util.go
464
commands/util.go
|
@ -1,464 +0,0 @@
|
||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
"github.com/docker/buildx/driver"
|
|
||||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
|
||||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
|
||||||
"github.com/docker/buildx/store"
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/context/docker"
|
|
||||||
dopts "github.com/docker/cli/opts"
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
|
||||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
|
||||||
de, err := storeutil.GetDockerEndpoint(dockerCli, ep)
|
|
||||||
if err == nil && de != "" {
|
|
||||||
if ep == "default" {
|
|
||||||
return de, nil
|
|
||||||
}
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
h, err := dopts.ParseHost(true, ep)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
|
||||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
|
||||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
|
||||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
|
|
||||||
dis := make([]build.DriverInfo, len(ng.Nodes))
|
|
||||||
|
|
||||||
var f driver.Factory
|
|
||||||
if ng.Driver != "" {
|
|
||||||
var err error
|
|
||||||
f, err = driver.GetFactory(ng.Driver, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// empty driver means nodegroup was implicitly created as a default
|
|
||||||
// driver for a docker context and allows falling back to a
|
|
||||||
// docker-container driver for older daemon that doesn't support
|
|
||||||
// buildkit (< 18.06).
|
|
||||||
ep := ng.Nodes[0].Endpoint
|
|
||||||
dockerapi, err := clientForEndpoint(dockerCli, ep)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// check if endpoint is healthy is needed to determine the driver type.
|
|
||||||
// if this fails then can't continue with driver selection.
|
|
||||||
if _, err = dockerapi.Ping(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ng.Driver = f.Name()
|
|
||||||
}
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range ng.Nodes {
|
|
||||||
func(i int, n store.Node) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
di := build.DriverInfo{
|
|
||||||
Name: n.Name,
|
|
||||||
Platform: n.Platforms,
|
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
dis[i] = di
|
|
||||||
}()
|
|
||||||
|
|
||||||
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
|
|
||||||
if err != nil {
|
|
||||||
di.Err = err
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
contextStore := dockerCli.ContextStore()
|
|
||||||
|
|
||||||
var kcc driver.KubeClientConfig
|
|
||||||
kcc, err = ctxkube.ConfigFromEndpoint(n.Endpoint, contextStore)
|
|
||||||
if err != nil {
|
|
||||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
|
||||||
// try again with name="default".
|
|
||||||
// FIXME: n should retain real context name.
|
|
||||||
kcc, err = ctxkube.ConfigFromEndpoint("default", contextStore)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tryToUseKubeConfigInCluster := false
|
|
||||||
if kcc == nil {
|
|
||||||
tryToUseKubeConfigInCluster = true
|
|
||||||
} else {
|
|
||||||
if _, err := kcc.ClientConfig(); err != nil {
|
|
||||||
tryToUseKubeConfigInCluster = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tryToUseKubeConfigInCluster {
|
|
||||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
|
||||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
|
||||||
logrus.Debug("using kube config in cluster")
|
|
||||||
kcc = kccInCluster
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
|
||||||
if err != nil {
|
|
||||||
di.Err = err
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
di.Driver = d
|
|
||||||
di.ImageOpt = imageopt
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(i, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dis, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// clientForEndpoint returns a docker client for an endpoint
|
|
||||||
func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClient, error) {
|
|
||||||
list, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, l := range list {
|
|
||||||
if l.Name == name {
|
|
||||||
epm, err := docker.EndpointFromContext(l)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ep, err := docker.WithTLSData(dockerCli.ContextStore(), name, epm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
clientOpts, err := ep.ClientOpts()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dockerclient.NewClientWithOpts(clientOpts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ep := docker.Endpoint{
|
|
||||||
EndpointMeta: docker.EndpointMeta{
|
|
||||||
Host: name,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
clientOpts, err := ep.ClientOpts()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dockerclient.NewClientWithOpts(clientOpts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
|
||||||
var defaultOnly bool
|
|
||||||
|
|
||||||
if instance == "default" && instance != dockerCli.CurrentContext() {
|
|
||||||
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
|
|
||||||
}
|
|
||||||
if instance == "default" || instance == dockerCli.CurrentContext() {
|
|
||||||
instance = ""
|
|
||||||
defaultOnly = true
|
|
||||||
}
|
|
||||||
list, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, l := range list {
|
|
||||||
if l.Name == instance {
|
|
||||||
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if instance != "" {
|
|
||||||
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
|
|
||||||
}
|
|
||||||
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
ng, err := txn.NodeGroupByName(instance)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDefaultDrivers returns drivers based on current cli config
|
|
||||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
if !defaultOnly {
|
|
||||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng != nil {
|
|
||||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, "", dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []build.DriverInfo{
|
|
||||||
{
|
|
||||||
Name: "default",
|
|
||||||
Driver: d,
|
|
||||||
ImageOpt: imageopt,
|
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadInfoData(ctx context.Context, d *dinfo) error {
|
|
||||||
if d.di.Driver == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
info, err := d.di.Driver.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.info = info
|
|
||||||
if info.Status == driver.Running {
|
|
||||||
c, err := d.di.Driver.Client(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
workers, err := c.ListWorkers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "listing workers")
|
|
||||||
}
|
|
||||||
for _, w := range workers {
|
|
||||||
d.platforms = append(d.platforms, w.Platforms...)
|
|
||||||
}
|
|
||||||
d.platforms = platformutil.Dedupe(d.platforms)
|
|
||||||
inf, err := c.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
|
|
||||||
d.version, err = d.di.Driver.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "getting version")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.version = inf.BuildkitVersion.Version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
|
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ngi.drivers = make([]dinfo, len(dis))
|
|
||||||
for i, di := range dis {
|
|
||||||
d := di
|
|
||||||
ngi.drivers[i].di = &d
|
|
||||||
func(d *dinfo) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
if err := loadInfoData(ctx, d); err != nil {
|
|
||||||
d.err = err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(&ngi.drivers[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if eg.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
kubernetesDriverCount := 0
|
|
||||||
|
|
||||||
for _, di := range ngi.drivers {
|
|
||||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
|
||||||
kubernetesDriverCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
|
|
||||||
|
|
||||||
if isAllKubernetesDrivers {
|
|
||||||
var drivers []dinfo
|
|
||||||
var dynamicNodes []store.Node
|
|
||||||
|
|
||||||
for _, di := range ngi.drivers {
|
|
||||||
// dynamic nodes are used in Kubernetes driver.
|
|
||||||
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
|
|
||||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
|
||||||
for i := 0; i < len(di.info.DynamicNodes); i++ {
|
|
||||||
// all []dinfo share *build.DriverInfo and *driver.Info
|
|
||||||
diClone := di
|
|
||||||
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
|
|
||||||
diClone.platforms = pl
|
|
||||||
}
|
|
||||||
drivers = append(drivers, di)
|
|
||||||
}
|
|
||||||
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// not append (remove the static nodes in the store)
|
|
||||||
ngi.ng.Nodes = dynamicNodes
|
|
||||||
ngi.drivers = drivers
|
|
||||||
ngi.ng.Dynamic = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasNodeGroup(list []*nginfo, ngi *nginfo) bool {
|
|
||||||
for _, l := range list {
|
|
||||||
if ngi.ng.Name == l.ng.Name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func dockerAPI(dockerCli command.Cli) *api {
|
|
||||||
return &api{dockerCli: dockerCli}
|
|
||||||
}
|
|
||||||
|
|
||||||
type api struct {
|
|
||||||
dockerCli command.Cli
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
|
|
||||||
if name == "" {
|
|
||||||
name = a.dockerCli.CurrentContext()
|
|
||||||
}
|
|
||||||
return clientForEndpoint(a.dockerCli, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
type dinfo struct {
|
|
||||||
di *build.DriverInfo
|
|
||||||
info *driver.Info
|
|
||||||
platforms []specs.Platform
|
|
||||||
version string
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type nginfo struct {
|
|
||||||
ng *store.NodeGroup
|
|
||||||
drivers []dinfo
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// inactive checks if all nodes are inactive for this builder
|
|
||||||
func (n *nginfo) inactive() bool {
|
|
||||||
for idx := range n.ng.Nodes {
|
|
||||||
d := n.drivers[idx]
|
|
||||||
if d.info != nil && d.info.Status == driver.Running {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
|
||||||
toBoot := make([]int, 0, len(ngi.drivers))
|
|
||||||
for i, d := range ngi.drivers {
|
|
||||||
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if d.info.Status != driver.Running {
|
|
||||||
toBoot = append(toBoot, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(toBoot) == 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
baseCtx := ctx
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
for _, idx := range toBoot {
|
|
||||||
func(idx int) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
|
||||||
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
|
|
||||||
if err != nil {
|
|
||||||
ngi.drivers[idx].err = err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = eg.Wait()
|
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, err
|
|
||||||
}
|
|
|
@ -7,10 +7,10 @@ import (
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/dockerutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/resolver"
|
"github.com/docker/buildx/util/resolver"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/context/docker"
|
|
||||||
buildkitdconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
buildkitdconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -24,19 +24,6 @@ func GetStore(dockerCli command.Cli) (*store.Txn, func(), error) {
|
||||||
return s.Txn()
|
return s.Txn()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCurrentEndpoint returns the current default endpoint value
|
|
||||||
func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
|
||||||
name := dockerCli.CurrentContext()
|
|
||||||
if name != "default" {
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
de, err := GetDockerEndpoint(dockerCli, name)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Errorf("docker endpoint for %q not found", name)
|
|
||||||
}
|
|
||||||
return de, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetProxyConfig(dockerCli command.Cli) map[string]string {
|
func GetProxyConfig(dockerCli command.Cli) map[string]string {
|
||||||
cfg := dockerCli.ConfigFile()
|
cfg := dockerCli.ConfigFile()
|
||||||
host := dockerCli.Client().DaemonHost()
|
host := dockerCli.Client().DaemonHost()
|
||||||
|
@ -63,31 +50,9 @@ func GetProxyConfig(dockerCli command.Cli) map[string]string {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDockerEndpoint returns docker endpoint string for given context
|
|
||||||
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
|
||||||
list, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, l := range list {
|
|
||||||
if l.Name == name {
|
|
||||||
ep, ok := l.Endpoints["docker"]
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
|
|
||||||
}
|
|
||||||
typed, ok := ep.(docker.EndpointMeta)
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
|
|
||||||
}
|
|
||||||
return typed.Host, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCurrentInstance finds the current builder instance
|
// GetCurrentInstance finds the current builder instance
|
||||||
func GetCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
|
func GetCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
|
||||||
ep, err := GetCurrentEndpoint(dockerCli)
|
ep, err := dockerutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
package dockerutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/context/docker"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientAPI represents an active docker API object.
|
||||||
|
type ClientAPI struct {
|
||||||
|
client.APIClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClientAPI(cli command.Cli, ep string) (*ClientAPI, error) {
|
||||||
|
ca := &ClientAPI{}
|
||||||
|
|
||||||
|
var dep docker.Endpoint
|
||||||
|
dem, err := GetDockerEndpoint(cli, ep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if dem != nil {
|
||||||
|
dep, err = docker.WithTLSData(cli.ContextStore(), ep, *dem)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dep = docker.Endpoint{
|
||||||
|
EndpointMeta: docker.EndpointMeta{
|
||||||
|
Host: ep,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
clientOpts, err := dep.ClientOpts()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ca.APIClient, err = client.NewClientWithOpts(clientOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ca, nil
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package dockerutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client represents an active docker object.
|
||||||
|
type Client struct {
|
||||||
|
cli command.Cli
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient initializes a new docker client.
|
||||||
|
func NewClient(cli command.Cli) *Client {
|
||||||
|
return &Client{cli: cli}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API returns a new docker API client.
|
||||||
|
func (c *Client) API(name string) (client.APIClient, error) {
|
||||||
|
if name == "" {
|
||||||
|
name = c.cli.CurrentContext()
|
||||||
|
}
|
||||||
|
return NewClientAPI(c.cli, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadImage imports an image to docker.
|
||||||
|
func (c *Client) LoadImage(ctx context.Context, name string, status progress.Writer) (io.WriteCloser, func(), error) {
|
||||||
|
dapi, err := c.API(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
done := make(chan struct{})
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
var w *waitingWriter
|
||||||
|
w = &waitingWriter{
|
||||||
|
PipeWriter: pw,
|
||||||
|
f: func() {
|
||||||
|
resp, err := dapi.ImageLoad(ctx, pr, false)
|
||||||
|
defer close(done)
|
||||||
|
if err != nil {
|
||||||
|
pr.CloseWithError(err)
|
||||||
|
w.mu.Lock()
|
||||||
|
w.err = err
|
||||||
|
w.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
prog := progress.WithPrefix(status, "", false)
|
||||||
|
progress.FromReader(prog, "importing to docker", resp.Body)
|
||||||
|
},
|
||||||
|
done: done,
|
||||||
|
cancel: cancel,
|
||||||
|
}
|
||||||
|
return w, func() {
|
||||||
|
pr.Close()
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type waitingWriter struct {
|
||||||
|
*io.PipeWriter
|
||||||
|
f func()
|
||||||
|
once sync.Once
|
||||||
|
mu sync.Mutex
|
||||||
|
err error
|
||||||
|
done chan struct{}
|
||||||
|
cancel func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||||
|
w.once.Do(func() {
|
||||||
|
go w.f()
|
||||||
|
})
|
||||||
|
return w.PipeWriter.Write(dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *waitingWriter) Close() error {
|
||||||
|
err := w.PipeWriter.Close()
|
||||||
|
<-w.done
|
||||||
|
if err == nil {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,40 @@
|
||||||
|
package dockerutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/context/docker"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetDockerEndpoint returns docker endpoint meta for given context
|
||||||
|
func GetDockerEndpoint(dockerCli command.Cli, name string) (*docker.EndpointMeta, error) {
|
||||||
|
list, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == name {
|
||||||
|
epm, err := docker.EndpointFromContext(l)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &epm, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentEndpoint returns the current default endpoint value
|
||||||
|
func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||||
|
name := dockerCli.CurrentContext()
|
||||||
|
if name != "default" {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
dem, err := GetDockerEndpoint(dockerCli, name)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Errorf("docker endpoint for %q not found", name)
|
||||||
|
} else if dem != nil {
|
||||||
|
return dem.Host, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
Loading…
Reference in New Issue