mirror of https://github.com/docker/buildx.git
Update to pass DockerMappingSrc and Dst in with Inputs, and return Inputs through Build
Signed-off-by: Talon Bowler <talon.bowler@docker.com>
This commit is contained in:
parent
f1b92e9e6c
commit
671bd1b54d
|
@ -101,6 +101,9 @@ type Inputs struct {
|
|||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
NamedContexts map[string]NamedContext
|
||||
// DockerfileMappingSrc and DockerfileMappingDst are filled in by the builder.
|
||||
DockerfileMappingSrc string
|
||||
DockerfileMappingDst string
|
||||
}
|
||||
|
||||
type NamedContext struct {
|
||||
|
@ -147,19 +150,18 @@ func toRepoOnly(in string) (string, error) {
|
|||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, dockerfileMappings map[string]string, err error) {
|
||||
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
|
||||
func Build(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
return BuildWithResultHandler(ctx, nodes, opts, docker, configDir, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, dockerfileMappings map[string]string, err error) {
|
||||
dockerfileMappings = map[string]string{}
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, nil, errors.Errorf("driver required for build")
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
||||
nodes, err = filterAvailableNodes(nodes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "no valid drivers found")
|
||||
return nil, errors.Wrapf(err, "no valid drivers found")
|
||||
}
|
||||
|
||||
var noMobyDriver *driver.DriverHandle
|
||||
|
@ -170,9 +172,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
}
|
||||
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) {
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opts) {
|
||||
var noOutputTargets []string
|
||||
for name, opt := range opt {
|
||||
for name, opt := range opts {
|
||||
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
continue
|
||||
}
|
||||
|
@ -193,9 +195,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
}
|
||||
|
||||
drivers, err := resolveDrivers(ctx, nodes, opt, w)
|
||||
drivers, err := resolveDrivers(ctx, nodes, opts, w)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defers := make([]func(), 0, 2)
|
||||
|
@ -210,7 +212,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
reqForNodes := make(map[string][]*reqForNode)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
for k, opt := range opt {
|
||||
for k, opt := range opts {
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
hasMobyDriver := false
|
||||
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
|
||||
|
@ -228,14 +230,16 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
opt.Platforms = np.platforms
|
||||
gatewayOpts, err := np.BuildOpts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
so, release, dockerfileMapping, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
||||
localOpt := opt
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, &localOpt, gatewayOpts, configDir, w, docker)
|
||||
opts[k] = localOpt
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
addGitAttrs(so)
|
||||
defers = append(defers, release)
|
||||
|
@ -243,7 +247,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
resolvedNode: np,
|
||||
so: so,
|
||||
})
|
||||
dockerfileMappings[k+":"+dockerfileMapping.Dst] = dockerfileMapping.Src
|
||||
}
|
||||
reqForNodes[k] = reqn
|
||||
for _, at := range opt.Session {
|
||||
|
@ -262,7 +265,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
for _, e := range np.so.Exports {
|
||||
if e.Type == "moby" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
return nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +274,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
|
||||
// validate that all links between targets use same drivers
|
||||
for name := range opt {
|
||||
for name := range opts {
|
||||
dps := reqForNodes[name]
|
||||
for i, dp := range dps {
|
||||
so := reqForNodes[name][i].so
|
||||
|
@ -280,7 +283,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
k2 := strings.TrimPrefix(v, "target:")
|
||||
dps2, ok := drivers[k2]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
}
|
||||
var found bool
|
||||
for _, dp2 := range dps2 {
|
||||
|
@ -290,7 +293,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -299,7 +302,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
|
||||
sharedSessions, err := detectSharedMounts(ctx, reqForNodes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
sharedSessionsWG := map[string]*sync.WaitGroup{}
|
||||
|
||||
|
@ -307,10 +310,10 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
var respMu sync.Mutex
|
||||
results := waitmap.New()
|
||||
|
||||
multiTarget := len(opt) > 1
|
||||
childTargets := calculateChildTargets(reqForNodes, opt)
|
||||
multiTarget := len(opts) > 1
|
||||
childTargets := calculateChildTargets(reqForNodes, opts)
|
||||
|
||||
for k, opt := range opt {
|
||||
for k, opt := range opts {
|
||||
err := func(k string) (err error) {
|
||||
opt := opt
|
||||
dps := drivers[k]
|
||||
|
@ -704,15 +707,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
return nil
|
||||
}(k)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, dockerfileMappings, nil
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func extractIndexAnnotations(exports []client.ExportEntry) (map[exptypes.AnnotationKey]string, error) {
|
||||
|
|
94
build/opt.go
94
build/opt.go
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), dockerfileMapping *DockerfileMapping, err error) {
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
nodeDriver := node.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
|
@ -62,7 +62,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
|
||||
for _, e := range opt.CacheTo {
|
||||
if e.Type != "inline" && !nodeDriver.Features(ctx)[driver.CacheExport] {
|
||||
return nil, nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||
return nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,9 +131,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if len(attests) > 0 {
|
||||
if !supportAttestations {
|
||||
if !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||
return nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||
}
|
||||
return nil, nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||
return nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||
}
|
||||
for k, v := range attests {
|
||||
so.FrontendAttrs["attest:"+k] = v
|
||||
|
@ -146,7 +146,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if v, ok := os.LookupEnv(noAttestEnv); ok {
|
||||
noProv, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||
return nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||
}
|
||||
}
|
||||
if !noProv {
|
||||
|
@ -169,7 +169,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
}
|
||||
default:
|
||||
if err := bopts.LLBCaps.Supports(pb.CapMultipleExporters); err != nil {
|
||||
return nil, nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||
return nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
for i, tag := range opt.Tags {
|
||||
ref, err := reference.Parse(tag)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||
return nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||
}
|
||||
tags[i] = ref.String()
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
for _, e := range opt.Exports {
|
||||
if e.Type == "image" && e.Attrs["name"] == "" && e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||
return nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
// set up exporters
|
||||
for i, e := range opt.Exports {
|
||||
if e.Type == "oci" && !nodeDriver.Features(ctx)[driver.OCIExporter] {
|
||||
return nil, nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
return nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
if e.Type == "docker" {
|
||||
features := docker.Features(ctx, e.Attrs["context"])
|
||||
|
@ -221,9 +221,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
opt.Exports[i].Type = "oci"
|
||||
} else if len(opt.Platforms) > 1 || len(attests) > 0 {
|
||||
if e.Output != nil {
|
||||
return nil, nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||
return nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||
}
|
||||
return nil, nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
}
|
||||
if e.Output == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
|
@ -231,7 +231,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
} else {
|
||||
w, cancel, err := docker.LoadImage(ctx, e.Attrs["context"], pw)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, cancel)
|
||||
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||
|
@ -239,7 +239,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
}
|
||||
}
|
||||
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||
return nil, nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
}
|
||||
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||
|
@ -247,7 +247,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||
return nil, nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -263,9 +263,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.Exports = opt.Exports
|
||||
so.Session = slices.Clone(opt.Session)
|
||||
|
||||
releaseLoad, dockerfileMapping, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, &opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defers = append(defers, releaseLoad)
|
||||
|
||||
|
@ -309,7 +309,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
pp[i] = platforms.Format(p)
|
||||
}
|
||||
if len(pp) > 1 && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||
return nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||
}
|
||||
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||
}
|
||||
|
@ -323,13 +323,13 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
case "", "default":
|
||||
default:
|
||||
return nil, nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||
}
|
||||
|
||||
// setup extrahosts
|
||||
extraHosts, err := toBuildkitExtraHosts(ctx, opt.ExtraHosts, nodeDriver)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(extraHosts) > 0 {
|
||||
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||
|
@ -343,7 +343,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
// setup ulimits
|
||||
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
} else if len(ulimits) > 0 {
|
||||
so.FrontendAttrs["ulimit"] = ulimits
|
||||
}
|
||||
|
@ -353,17 +353,12 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.Internal = true
|
||||
}
|
||||
|
||||
return &so, releaseF, dockerfileMapping, nil
|
||||
return &so, releaseF, nil
|
||||
}
|
||||
|
||||
type DockerfileMapping struct {
|
||||
Src string
|
||||
Dst string
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), *DockerfileMapping, error) {
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
|
||||
// TODO: handle stdin, symlinks, remote contexts, check files exist
|
||||
|
@ -376,11 +371,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
dockerfileSrcName = inp.DockerfilePath
|
||||
toRemove []string
|
||||
)
|
||||
if inp.DockerfilePath == "-" {
|
||||
dockerfileSrcName = "stdin"
|
||||
} else if inp.DockerfilePath == "" {
|
||||
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||
}
|
||||
|
||||
switch {
|
||||
case inp.ContextState != nil:
|
||||
|
@ -391,13 +381,13 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
|
||||
rc := inp.InStream.NewReadCloser()
|
||||
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
if !(err == io.EOF && len(magic) == 0) {
|
||||
if isArchive(magic) {
|
||||
|
@ -407,20 +397,20 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = rc
|
||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case osutil.IsLocalDir(inp.ContextPath):
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
sharedKey := inp.ContextPath
|
||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||
|
@ -446,17 +436,22 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
}
|
||||
target.FrontendAttrs["context"] = inp.ContextPath
|
||||
default:
|
||||
return nil, nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
|
||||
dockerfileSrcName = "inline"
|
||||
} else if inp.DockerfilePath == "-" {
|
||||
dockerfileSrcName = "stdin"
|
||||
} else if inp.DockerfilePath == "" {
|
||||
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||
}
|
||||
|
||||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
|
@ -465,7 +460,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
if isHTTPURL(inp.DockerfilePath) {
|
||||
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
|
@ -479,7 +474,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
|
||||
if dockerfileDir != "" {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
}
|
||||
|
@ -513,12 +508,12 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
if !hasDigest {
|
||||
dig, err = resolveDigest(localPath, tag)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||
return nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||
}
|
||||
}
|
||||
store, err := local.NewStore(localPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||
}
|
||||
storeName := identity.NewID()
|
||||
if target.OCIStores == nil {
|
||||
|
@ -531,17 +526,17 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
}
|
||||
st, err := os.Stat(v.Path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
}
|
||||
localName := k
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
}
|
||||
|
@ -551,7 +546,10 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
_ = os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
return release, &DockerfileMapping{Src: dockerfileSrcName, Dst: dockerfileName}, nil
|
||||
|
||||
inp.DockerfileMappingSrc = dockerfileSrcName
|
||||
inp.DockerfileMappingDst = dockerfileName
|
||||
return release, nil
|
||||
}
|
||||
|
||||
func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||
|
|
|
@ -265,7 +265,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
}
|
||||
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
resp, dfmap, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
if callFormatJSON {
|
||||
jsonResults[name] = map[string]any{}
|
||||
buf := &bytes.Buffer{}
|
||||
if code, err := printResult(buf, pf, res, name, dfmap); err != nil {
|
||||
if code, err := printResult(buf, pf, res, name, &req.Inputs); err != nil {
|
||||
jsonResults[name]["error"] = err.Error()
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
|
@ -361,7 +361,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
if code, err := printResult(dockerCli.Out(), pf, res, name, dfmap); err != nil {
|
||||
if code, err := printResult(dockerCli.Out(), pf, res, name, &req.Inputs); err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
|
|
|
@ -347,12 +347,12 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
var resp *client.SolveResponse
|
||||
var inputs *build.Inputs
|
||||
var retErr error
|
||||
var dfmap map[string]string
|
||||
if confutil.IsExperimental() {
|
||||
resp, dfmap, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
||||
resp, inputs, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
||||
} else {
|
||||
resp, dfmap, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
||||
resp, inputs, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
||||
}
|
||||
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
|
@ -389,7 +389,7 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
}
|
||||
}
|
||||
if opts.CallFunc != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse, options.target, dfmap); err != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse, options.target, inputs); err != nil {
|
||||
return err
|
||||
} else if exitcode != 0 {
|
||||
os.Exit(exitcode)
|
||||
|
@ -407,7 +407,7 @@ func getImageID(resp map[string]string) string {
|
|||
return dgst
|
||||
}
|
||||
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, map[string]string, error) {
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false)
|
||||
if res != nil {
|
||||
res.Done()
|
||||
|
@ -415,7 +415,7 @@ func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllera
|
|||
return resp, dfmap, err
|
||||
}
|
||||
|
||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, map[string]string, error) {
|
||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
|
||||
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||
// stdin must be usable for monitor
|
||||
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
|
@ -440,7 +440,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
var ref string
|
||||
var retErr error
|
||||
var resp *client.SolveResponse
|
||||
var dfmap map[string]string
|
||||
var inputs *build.Inputs
|
||||
|
||||
var f *ioset.SingleForwarder
|
||||
var pr io.ReadCloser
|
||||
|
@ -458,7 +458,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
})
|
||||
}
|
||||
|
||||
ref, resp, dfmap, err = c.Build(ctx, *opts, pr, printer)
|
||||
ref, resp, inputs, err = c.Build(ctx, *opts, pr, printer)
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
|
@ -507,7 +507,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
}
|
||||
}
|
||||
|
||||
return resp, dfmap, retErr
|
||||
return resp, inputs, retErr
|
||||
}
|
||||
|
||||
func printError(err error, printer *progress.Printer) error {
|
||||
|
@ -885,7 +885,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
|||
}
|
||||
}
|
||||
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string, target string, dfmap map[string]string) (int, error) {
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string, target string, inp *build.Inputs) (int, error) {
|
||||
switch f.Name {
|
||||
case "outline":
|
||||
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
|
@ -912,16 +912,16 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string,
|
|||
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
||||
}
|
||||
sourceInfoMap := func(sourceInfo *solverpb.SourceInfo) *solverpb.SourceInfo {
|
||||
if sourceInfo == nil || dfmap == nil {
|
||||
if sourceInfo == nil || inp == nil {
|
||||
return sourceInfo
|
||||
}
|
||||
if target == "" {
|
||||
target = "default"
|
||||
}
|
||||
|
||||
if dfsrcname, ok := dfmap[target+":"+sourceInfo.Filename]; ok {
|
||||
if inp.DockerfileMappingSrc != "" {
|
||||
newSourceInfo := *sourceInfo
|
||||
newSourceInfo.Filename = dfsrcname
|
||||
newSourceInfo.Filename = inp.DockerfileMappingSrc
|
||||
return &newSourceInfo
|
||||
}
|
||||
return sourceInfo
|
||||
|
@ -947,7 +947,7 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string,
|
|||
fmt.Fprintln(w)
|
||||
}
|
||||
lintBuf := bytes.NewBuffer(nil)
|
||||
lintResults.PrintErrorTo(lintBuf)
|
||||
lintResults.PrintErrorTo(lintBuf, sourceInfoMap)
|
||||
return 0, errors.New(lintBuf.String())
|
||||
} else if len(lintResults.Warnings) == 0 && f.Format != "json" {
|
||||
fmt.Fprintln(w, "Check complete, no warnings found.")
|
||||
|
|
|
@ -34,7 +34,7 @@ const defaultTargetName = "default"
|
|||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
@ -188,13 +188,18 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
resp, res, dockerfileMappings, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||
var inputs *build.Inputs
|
||||
buildOptions := map[string]build.Options{defaultTargetName: opts}
|
||||
resp, res, err := buildTargets(ctx, dockerCli, nodes, buildOptions, progress, generateResult)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||
return nil, res, dockerfileMappings, err
|
||||
return nil, res, nil, err
|
||||
}
|
||||
return resp, res, dockerfileMappings, nil
|
||||
if i, ok := buildOptions[defaultTargetName]; ok {
|
||||
inputs = &i.Inputs
|
||||
}
|
||||
return resp, res, inputs, nil
|
||||
}
|
||||
|
||||
// buildTargets runs the specified build and returns the result.
|
||||
|
@ -202,15 +207,14 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
var res *build.ResultHandle
|
||||
var resp map[string]*client.SolveResponse
|
||||
var err error
|
||||
var dfmappings map[string]string
|
||||
if generateResult {
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, dfmappings, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
|
@ -218,12 +222,12 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
|
|||
}
|
||||
})
|
||||
} else {
|
||||
resp, dfmappings, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
||||
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, res, nil, err
|
||||
return nil, res, err
|
||||
}
|
||||
return resp[defaultTargetName], res, dfmappings, err
|
||||
return resp[defaultTargetName], res, err
|
||||
}
|
||||
|
||||
func wrapBuildError(err error, bake bool) error {
|
||||
|
|
|
@ -4,13 +4,14 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
type BuildxController interface {
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, dockerfileMappings map[string]string, err error)
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||
// Invoke starts an IO session into the specified process.
|
||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
||||
|
|
|
@ -42,7 +42,7 @@ type localController struct {
|
|||
buildOnGoing atomic.Bool
|
||||
}
|
||||
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, map[string]string, error) {
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||
return "", nil, nil, errors.New("build ongoing")
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/pkg/dialer"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
|
@ -113,7 +114,7 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
|
|||
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
|
||||
}
|
||||
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, map[string]string, error) {
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
|
||||
ref := identity.NewID()
|
||||
statusChan := make(chan *client.SolveStatus)
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
|
|
|
@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|||
}()
|
||||
|
||||
// prepare server
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
|
||||
})
|
||||
defer b.Close()
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, dfmappping map[string]string, err error)
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, inp *build.Inputs, err error)
|
||||
|
||||
func NewServer(buildFunc BuildFunc) *Server {
|
||||
return &Server{
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"text/tabwriter"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/monitor/commands"
|
||||
|
@ -243,7 +244,7 @@ type monitor struct {
|
|||
lastBuildResult *MonitorBuildResult
|
||||
}
|
||||
|
||||
func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, dockerMappings map[string]string, err error) {
|
||||
func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, input *build.Inputs, err error) {
|
||||
ref, resp, _, err = m.BuildxController.Build(ctx, options, in, progress)
|
||||
m.lastBuildResult = &MonitorBuildResult{Resp: resp, Err: err} // Record build result
|
||||
return
|
||||
|
|
Loading…
Reference in New Issue