mirror of https://github.com/docker/buildx.git
update Build commands to return dockerfile mapping for use in printing rule check warnings
Signed-off-by: Talon Bowler <talon.bowler@docker.com>
This commit is contained in:
parent
db117855da
commit
f1b92e9e6c
|
@ -147,18 +147,19 @@ func toRepoOnly(in string) (string, error) {
|
|||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, dockerfileMappings map[string]string, err error) {
|
||||
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, dockerfileMappings map[string]string, err error) {
|
||||
dockerfileMappings = map[string]string{}
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
return nil, nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
||||
nodes, err = filterAvailableNodes(nodes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no valid drivers found")
|
||||
return nil, nil, errors.Wrapf(err, "no valid drivers found")
|
||||
}
|
||||
|
||||
var noMobyDriver *driver.DriverHandle
|
||||
|
@ -194,7 +195,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
|
||||
drivers, err := resolveDrivers(ctx, nodes, opt, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defers := make([]func(), 0, 2)
|
||||
|
@ -227,14 +228,14 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
opt.Platforms = np.platforms
|
||||
gatewayOpts, err := np.BuildOpts(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
||||
so, release, dockerfileMapping, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
addGitAttrs(so)
|
||||
defers = append(defers, release)
|
||||
|
@ -242,6 +243,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
resolvedNode: np,
|
||||
so: so,
|
||||
})
|
||||
dockerfileMappings[k+":"+dockerfileMapping.Dst] = dockerfileMapping.Src
|
||||
}
|
||||
reqForNodes[k] = reqn
|
||||
for _, at := range opt.Session {
|
||||
|
@ -260,7 +262,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
for _, e := range np.so.Exports {
|
||||
if e.Type == "moby" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
return nil, nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +280,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
k2 := strings.TrimPrefix(v, "target:")
|
||||
dps2, ok := drivers[k2]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
return nil, nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
}
|
||||
var found bool
|
||||
for _, dp2 := range dps2 {
|
||||
|
@ -288,7 +290,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
return nil, nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -297,7 +299,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
|
||||
sharedSessions, err := detectSharedMounts(ctx, reqForNodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
sharedSessionsWG := map[string]*sync.WaitGroup{}
|
||||
|
||||
|
@ -702,15 +704,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
return nil
|
||||
}(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
return resp, dockerfileMappings, nil
|
||||
}
|
||||
|
||||
func extractIndexAnnotations(exports []client.ExportEntry) (map[exptypes.AnnotationKey]string, error) {
|
||||
|
|
87
build/opt.go
87
build/opt.go
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
|
||||
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), dockerfileMapping *DockerfileMapping, err error) {
|
||||
nodeDriver := node.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
|
@ -62,7 +62,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
|
||||
for _, e := range opt.CacheTo {
|
||||
if e.Type != "inline" && !nodeDriver.Features(ctx)[driver.CacheExport] {
|
||||
return nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||
return nil, nil, nil, notSupported(driver.CacheExport, nodeDriver, "https://docs.docker.com/go/build-cache-backends/")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,9 +131,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if len(attests) > 0 {
|
||||
if !supportAttestations {
|
||||
if !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||
return nil, nil, nil, notSupported("Attestation", nodeDriver, "https://docs.docker.com/go/attestations/")
|
||||
}
|
||||
return nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||
return nil, nil, nil, errors.Errorf("Attestations are not supported by the current BuildKit daemon")
|
||||
}
|
||||
for k, v := range attests {
|
||||
so.FrontendAttrs["attest:"+k] = v
|
||||
|
@ -146,7 +146,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if v, ok := os.LookupEnv(noAttestEnv); ok {
|
||||
noProv, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||
return nil, nil, nil, errors.Wrap(err, "invalid "+noAttestEnv)
|
||||
}
|
||||
}
|
||||
if !noProv {
|
||||
|
@ -169,7 +169,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
}
|
||||
default:
|
||||
if err := bopts.LLBCaps.Supports(pb.CapMultipleExporters); err != nil {
|
||||
return nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||
return nil, nil, nil, errors.Errorf("multiple outputs currently unsupported by the current BuildKit daemon, please upgrade to version v0.13+ or use a single output")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
for i, tag := range opt.Tags {
|
||||
ref, err := reference.Parse(tag)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||
return nil, nil, nil, errors.Wrapf(err, "invalid tag %q", tag)
|
||||
}
|
||||
tags[i] = ref.String()
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
for _, e := range opt.Exports {
|
||||
if e.Type == "image" && e.Attrs["name"] == "" && e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||
return nil, nil, nil, errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
// set up exporters
|
||||
for i, e := range opt.Exports {
|
||||
if e.Type == "oci" && !nodeDriver.Features(ctx)[driver.OCIExporter] {
|
||||
return nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
return nil, nil, nil, notSupported(driver.OCIExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
if e.Type == "docker" {
|
||||
features := docker.Features(ctx, e.Attrs["context"])
|
||||
|
@ -221,9 +221,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
opt.Exports[i].Type = "oci"
|
||||
} else if len(opt.Platforms) > 1 || len(attests) > 0 {
|
||||
if e.Output != nil {
|
||||
return nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||
return nil, nil, nil, errors.Errorf("docker exporter does not support exporting manifest lists, use the oci exporter instead")
|
||||
}
|
||||
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
return nil, nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||
}
|
||||
if e.Output == nil {
|
||||
if nodeDriver.IsMobyDriver() {
|
||||
|
@ -231,7 +231,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
} else {
|
||||
w, cancel, err := docker.LoadImage(ctx, e.Attrs["context"], pw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defers = append(defers, cancel)
|
||||
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
|
||||
|
@ -239,7 +239,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
}
|
||||
}
|
||||
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
|
||||
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
return nil, nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")
|
||||
}
|
||||
}
|
||||
if e.Type == "image" && nodeDriver.IsMobyDriver() {
|
||||
|
@ -247,7 +247,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
if e.Attrs["push"] != "" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push-by-digest"]); ok {
|
||||
return nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
return nil, nil, nil, errors.Errorf("push-by-digest is currently not implemented for docker driver, please create a new builder instance")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -263,9 +263,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.Exports = opt.Exports
|
||||
so.Session = slices.Clone(opt.Session)
|
||||
|
||||
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||
releaseLoad, dockerfileMapping, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defers = append(defers, releaseLoad)
|
||||
|
||||
|
@ -309,7 +309,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
pp[i] = platforms.Format(p)
|
||||
}
|
||||
if len(pp) > 1 && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
|
||||
return nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||
return nil, nil, nil, notSupported(driver.MultiPlatform, nodeDriver, "https://docs.docker.com/go/build-multi-platform/")
|
||||
}
|
||||
so.FrontendAttrs["platform"] = strings.Join(pp, ",")
|
||||
}
|
||||
|
@ -323,13 +323,13 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
case "", "default":
|
||||
default:
|
||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||
return nil, nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
||||
}
|
||||
|
||||
// setup extrahosts
|
||||
extraHosts, err := toBuildkitExtraHosts(ctx, opt.ExtraHosts, nodeDriver)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if len(extraHosts) > 0 {
|
||||
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||
|
@ -343,7 +343,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
// setup ulimits
|
||||
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
} else if len(ulimits) > 0 {
|
||||
so.FrontendAttrs["ulimit"] = ulimits
|
||||
}
|
||||
|
@ -353,12 +353,17 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
|
|||
so.Internal = true
|
||||
}
|
||||
|
||||
return &so, releaseF, nil
|
||||
return &so, releaseF, dockerfileMapping, nil
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
|
||||
type DockerfileMapping struct {
|
||||
Src string
|
||||
Dst string
|
||||
}
|
||||
|
||||
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), *DockerfileMapping, error) {
|
||||
if inp.ContextPath == "" {
|
||||
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
return nil, nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
|
||||
}
|
||||
|
||||
// TODO: handle stdin, symlinks, remote contexts, check files exist
|
||||
|
@ -368,8 +373,14 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
dockerfileReader io.ReadCloser
|
||||
dockerfileDir string
|
||||
dockerfileName = inp.DockerfilePath
|
||||
dockerfileSrcName = inp.DockerfilePath
|
||||
toRemove []string
|
||||
)
|
||||
if inp.DockerfilePath == "-" {
|
||||
dockerfileSrcName = "stdin"
|
||||
} else if inp.DockerfilePath == "" {
|
||||
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
|
||||
}
|
||||
|
||||
switch {
|
||||
case inp.ContextState != nil:
|
||||
|
@ -380,13 +391,13 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
target.FrontendInputs["dockerfile"] = *inp.ContextState
|
||||
case inp.ContextPath == "-":
|
||||
if inp.DockerfilePath == "-" {
|
||||
return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
return nil, nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
|
||||
rc := inp.InStream.NewReadCloser()
|
||||
magic, err := inp.InStream.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
return nil, nil, errors.Wrap(err, "failed to peek context header from STDIN")
|
||||
}
|
||||
if !(err == io.EOF && len(magic) == 0) {
|
||||
if isArchive(magic) {
|
||||
|
@ -396,20 +407,20 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
target.Session = append(target.Session, up)
|
||||
} else {
|
||||
if inp.DockerfilePath != "" {
|
||||
return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
return nil, nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||
}
|
||||
// stdin is dockerfile
|
||||
dockerfileReader = rc
|
||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
||||
toRemove = append(toRemove, inp.ContextPath)
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case osutil.IsLocalDir(inp.ContextPath):
|
||||
if err := setLocalMount("context", inp.ContextPath, target); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
sharedKey := inp.ContextPath
|
||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||
|
@ -435,7 +446,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
}
|
||||
target.FrontendAttrs["context"] = inp.ContextPath
|
||||
default:
|
||||
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
return nil, nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
|
||||
}
|
||||
|
||||
if inp.DockerfileInline != "" {
|
||||
|
@ -445,7 +456,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
if dockerfileReader != nil {
|
||||
dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
|
@ -454,7 +465,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
if isHTTPURL(inp.DockerfilePath) {
|
||||
dockerfileDir, err = createTempDockerfileFromURL(ctx, d, inp.DockerfilePath, pw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
toRemove = append(toRemove, dockerfileDir)
|
||||
dockerfileName = "Dockerfile"
|
||||
|
@ -468,7 +479,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
|
||||
if dockerfileDir != "" {
|
||||
if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName)
|
||||
}
|
||||
|
@ -502,12 +513,12 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
if !hasDigest {
|
||||
dig, err = resolveDigest(localPath, tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||
return nil, nil, errors.Wrapf(err, "oci-layout reference %q could not be resolved", v.Path)
|
||||
}
|
||||
}
|
||||
store, err := local.NewStore(localPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||
return nil, nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
||||
}
|
||||
storeName := identity.NewID()
|
||||
if target.OCIStores == nil {
|
||||
|
@ -520,17 +531,17 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
}
|
||||
st, err := os.Stat(v.Path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
return nil, nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
return nil, nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
}
|
||||
localName := k
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
if err := setLocalMount(localName, v.Path, target); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
}
|
||||
|
@ -540,7 +551,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
|
|||
_ = os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
return release, nil
|
||||
return release, &DockerfileMapping{Src: dockerfileSrcName, Dst: dockerfileName}, nil
|
||||
}
|
||||
|
||||
func resolveDigest(localPath, tag string) (dig string, _ error) {
|
||||
|
|
|
@ -265,7 +265,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
}
|
||||
|
||||
done := timeBuildCommand(mp, attributes)
|
||||
resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
resp, dfmap, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
if callFormatJSON {
|
||||
jsonResults[name] = map[string]any{}
|
||||
buf := &bytes.Buffer{}
|
||||
if code, err := printResult(buf, pf, res); err != nil {
|
||||
if code, err := printResult(buf, pf, res, name, dfmap); err != nil {
|
||||
jsonResults[name]["error"] = err.Error()
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
|
@ -361,7 +361,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
if code, err := printResult(dockerCli.Out(), pf, res); err != nil {
|
||||
if code, err := printResult(dockerCli.Out(), pf, res, name, dfmap); err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
|
||||
exitCode = 1
|
||||
} else if code != 0 && exitCode == 0 {
|
||||
|
|
|
@ -49,6 +49,7 @@ import (
|
|||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
solverpb "github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/morikuni/aec"
|
||||
|
@ -347,10 +348,11 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
done := timeBuildCommand(mp, attributes)
|
||||
var resp *client.SolveResponse
|
||||
var retErr error
|
||||
var dfmap map[string]string
|
||||
if confutil.IsExperimental() {
|
||||
resp, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
||||
resp, dfmap, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
|
||||
} else {
|
||||
resp, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
||||
resp, dfmap, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
|
||||
}
|
||||
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
|
@ -387,7 +389,7 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
}
|
||||
}
|
||||
if opts.CallFunc != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil {
|
||||
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse, options.target, dfmap); err != nil {
|
||||
return err
|
||||
} else if exitcode != 0 {
|
||||
os.Exit(exitcode)
|
||||
|
@ -405,22 +407,22 @@ func getImageID(resp map[string]string) string {
|
|||
return dgst
|
||||
}
|
||||
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
||||
resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false)
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, map[string]string, error) {
|
||||
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false)
|
||||
if res != nil {
|
||||
res.Done()
|
||||
}
|
||||
return resp, err
|
||||
return resp, dfmap, err
|
||||
}
|
||||
|
||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, map[string]string, error) {
|
||||
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||
// stdin must be usable for monitor
|
||||
return nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
}
|
||||
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
|
@ -432,12 +434,13 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
// so we need to resolve paths to abosolute ones in the client.
|
||||
opts, err = controllerapi.ResolveOptionPaths(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var ref string
|
||||
var retErr error
|
||||
var resp *client.SolveResponse
|
||||
var dfmap map[string]string
|
||||
|
||||
var f *ioset.SingleForwarder
|
||||
var pr io.ReadCloser
|
||||
|
@ -455,7 +458,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
})
|
||||
}
|
||||
|
||||
ref, resp, err = c.Build(ctx, *opts, pr, printer)
|
||||
ref, resp, dfmap, err = c.Build(ctx, *opts, pr, printer)
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
|
@ -463,7 +466,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
retErr = err
|
||||
// We can proceed to monitor
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "failed to build")
|
||||
return nil, nil, errors.Wrapf(err, "failed to build")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,7 +507,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
|||
}
|
||||
}
|
||||
|
||||
return resp, retErr
|
||||
return resp, dfmap, retErr
|
||||
}
|
||||
|
||||
func printError(err error, printer *progress.Printer) error {
|
||||
|
@ -882,7 +885,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
|||
}
|
||||
}
|
||||
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) {
|
||||
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string, target string, dfmap map[string]string) (int, error) {
|
||||
switch f.Name {
|
||||
case "outline":
|
||||
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
||||
|
@ -908,8 +911,27 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
|
|||
}
|
||||
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
|
||||
}
|
||||
sourceInfoMap := func(sourceInfo *solverpb.SourceInfo) *solverpb.SourceInfo {
|
||||
if sourceInfo == nil || dfmap == nil {
|
||||
return sourceInfo
|
||||
}
|
||||
if target == "" {
|
||||
target = "default"
|
||||
}
|
||||
|
||||
err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
if dfsrcname, ok := dfmap[target+":"+sourceInfo.Filename]; ok {
|
||||
newSourceInfo := *sourceInfo
|
||||
newSourceInfo.Filename = dfsrcname
|
||||
return &newSourceInfo
|
||||
}
|
||||
return sourceInfo
|
||||
}
|
||||
|
||||
printLintWarnings := func(dt []byte, w io.Writer) error {
|
||||
return lintResults.PrintTo(w, sourceInfoMap)
|
||||
}
|
||||
|
||||
err := printValue(w, printLintWarnings, lint.SubrequestLintDefinition.Version, f.Format, res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -924,13 +946,8 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
|
|||
if f.Format != "json" && len(lintResults.Warnings) > 0 {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
lintBuf := bytes.NewBuffer([]byte(lintResults.Error.Message + "\n"))
|
||||
sourceInfo := lintResults.Sources[lintResults.Error.Location.SourceIndex]
|
||||
source := errdefs.Source{
|
||||
Info: sourceInfo,
|
||||
Ranges: lintResults.Error.Location.Ranges,
|
||||
}
|
||||
source.Print(lintBuf)
|
||||
lintBuf := bytes.NewBuffer(nil)
|
||||
lintResults.PrintErrorTo(lintBuf)
|
||||
return 0, errors.New(lintBuf.String())
|
||||
} else if len(lintResults.Warnings) == 0 && f.Format != "json" {
|
||||
fmt.Fprintln(w, "Check complete, no warnings found.")
|
||||
|
@ -968,11 +985,6 @@ func printValue(w io.Writer, printer callFunc, version string, format string, re
|
|||
return printer([]byte(res["result.json"]), w)
|
||||
}
|
||||
|
||||
// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted
|
||||
func printLintViolationsWrapper(dt []byte, w io.Writer) error {
|
||||
return lint.PrintLintViolations(dt, w, nil)
|
||||
}
|
||||
|
||||
type invokeConfig struct {
|
||||
controllerapi.InvokeConfig
|
||||
onFlag string
|
||||
|
|
|
@ -34,9 +34,9 @@ const defaultTargetName = "default"
|
|||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
||||
contexts := map[string]build.NamedContext{}
|
||||
|
@ -70,7 +70,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
platforms, err := platformutil.Parse(in.Platforms)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
opts.Platforms = platforms
|
||||
|
||||
|
@ -79,7 +79,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
opts.Session = append(opts.Session, secrets)
|
||||
|
||||
|
@ -89,13 +89,13 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
}
|
||||
ssh, err := controllerapi.CreateSSH(sshSpecs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
opts.Session = append(opts.Session, ssh)
|
||||
|
||||
outputs, err := controllerapi.CreateExports(in.Exports)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if in.ExportPush {
|
||||
var pushUsed bool
|
||||
|
@ -134,7 +134,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
annotations, err := buildflags.ParseAnnotations(in.Annotations)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "parse annotations")
|
||||
return nil, nil, nil, errors.Wrap(err, "parse annotations")
|
||||
}
|
||||
|
||||
for _, o := range outputs {
|
||||
|
@ -154,7 +154,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
allow, err := buildflags.ParseEntitlements(in.Allow)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
opts.Allow = allow
|
||||
|
||||
|
@ -178,23 +178,23 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
builder.WithContextPathHash(contextPathHash),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
||||
return nil, nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
|
||||
}
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
resp, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||
resp, res, dockerfileMappings, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||
return nil, res, err
|
||||
return nil, res, dockerfileMappings, err
|
||||
}
|
||||
return resp, res, nil
|
||||
return resp, res, dockerfileMappings, nil
|
||||
}
|
||||
|
||||
// buildTargets runs the specified build and returns the result.
|
||||
|
@ -202,14 +202,15 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
var res *build.ResultHandle
|
||||
var resp map[string]*client.SolveResponse
|
||||
var err error
|
||||
var dfmappings map[string]string
|
||||
if generateResult {
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
resp, dfmappings, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
|
@ -217,12 +218,12 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
|
|||
}
|
||||
})
|
||||
} else {
|
||||
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
||||
resp, dfmappings, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, res, err
|
||||
return nil, res, nil, err
|
||||
}
|
||||
return resp[defaultTargetName], res, err
|
||||
return resp[defaultTargetName], res, dfmappings, err
|
||||
}
|
||||
|
||||
func wrapBuildError(err error, bake bool) error {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
type BuildxController interface {
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, dockerfileMappings map[string]string, err error)
|
||||
// Invoke starts an IO session into the specified process.
|
||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
||||
|
|
|
@ -42,13 +42,13 @@ type localController struct {
|
|||
buildOnGoing atomic.Bool
|
||||
}
|
||||
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, map[string]string, error) {
|
||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||
return "", nil, errors.New("build ongoing")
|
||||
return "", nil, nil, errors.New("build ongoing")
|
||||
}
|
||||
defer b.buildOnGoing.Store(false)
|
||||
|
||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||
resp, res, dockerfileMappings, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||
// NOTE: RunBuild can return *build.ResultHandle even on error.
|
||||
if res != nil {
|
||||
b.buildConfig = buildConfig{
|
||||
|
@ -60,9 +60,9 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build
|
|||
}
|
||||
}
|
||||
if buildErr != nil {
|
||||
return "", nil, buildErr
|
||||
return "", nil, nil, buildErr
|
||||
}
|
||||
return b.ref, resp, nil
|
||||
return b.ref, resp, dockerfileMappings, nil
|
||||
}
|
||||
|
||||
func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) {
|
||||
|
|
|
@ -113,7 +113,7 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
|
|||
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
|
||||
}
|
||||
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, map[string]string, error) {
|
||||
ref := identity.NewID()
|
||||
statusChan := make(chan *client.SolveStatus)
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
|
@ -131,7 +131,7 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
|
|||
}
|
||||
return nil
|
||||
})
|
||||
return ref, resp, eg.Wait()
|
||||
return ref, resp, nil, eg.Wait()
|
||||
}
|
||||
|
||||
func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
|
||||
|
|
|
@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|||
}()
|
||||
|
||||
// prepare server
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, map[string]string, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
|
||||
})
|
||||
defer b.Close()
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error)
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, dfmappping map[string]string, err error)
|
||||
|
||||
func NewServer(buildFunc BuildFunc) *Server {
|
||||
return &Server{
|
||||
|
@ -200,7 +200,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||
// Build the specified request
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[ref]; ok {
|
||||
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
|
|
|
@ -61,7 +61,7 @@ func (cm *ReloadCmd) Exec(ctx context.Context, args []string) error {
|
|||
}
|
||||
var resultUpdated bool
|
||||
cm.progress.Unpause()
|
||||
ref, _, err := cm.m.Build(ctx, *bo, nil, cm.progress) // TODO: support stdin, hold build ref
|
||||
ref, _, _, err := cm.m.Build(ctx, *bo, nil, cm.progress) // TODO: support stdin, hold build ref
|
||||
cm.progress.Pause()
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
|
|
|
@ -243,8 +243,8 @@ type monitor struct {
|
|||
lastBuildResult *MonitorBuildResult
|
||||
}
|
||||
|
||||
func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error) {
|
||||
ref, resp, err = m.BuildxController.Build(ctx, options, in, progress)
|
||||
func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, dockerMappings map[string]string, err error) {
|
||||
ref, resp, _, err = m.BuildxController.Build(ctx, options, in, progress)
|
||||
m.lastBuildResult = &MonitorBuildResult{Resp: resp, Err: err} // Record build result
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1398,4 +1398,48 @@ target "second" {
|
|||
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
t.Run("check for Dockerfile path printed with context when displaying rule check warnings with multiple build targets", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
copy Dockerfile .
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "first" {
|
||||
dockerfile = "Dockerfile"
|
||||
}
|
||||
target "second" {
|
||||
dockerfile = "subdir/Dockerfile"
|
||||
}
|
||||
target "third" {
|
||||
dockerfile = "subdir/subsubdir/Dockerfile"
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateDir("subdir", 0700),
|
||||
fstest.CreateDir("subdir/subsubdir", 0700),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("subdir/Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("subdir/subsubdir/Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
)
|
||||
|
||||
dockerfilePathFirst := filepath.Join("Dockerfile")
|
||||
dockerfilePathSecond := filepath.Join("subdir", "Dockerfile")
|
||||
dockerfilePathThird := filepath.Join("subdir", "subsubdir", "Dockerfile")
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--call", "check", "first", "second", "third"),
|
||||
)
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), dockerfilePathFirst+":3")
|
||||
require.Contains(t, stdout.String(), dockerfilePathSecond+":3")
|
||||
require.Contains(t, stdout.String(), dockerfilePathThird+":3")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1291,6 +1291,29 @@ cOpy Dockerfile .
|
|||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
})
|
||||
|
||||
t.Run("check for Dockerfile path printed with context when displaying rule check warnings", func(t *testing.T) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateDir("subdir", 0700),
|
||||
fstest.CreateFile("subdir/Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
dockerfilePath := filepath.Join(dir, "subdir", "Dockerfile")
|
||||
|
||||
cmd := buildxCmd(sb, withArgs("build", "--call=check", "-f", dockerfilePath, dir))
|
||||
stdout := bytes.Buffer{}
|
||||
stderr := bytes.Buffer{}
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
|
||||
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
|
||||
require.Contains(t, stdout.String(), dockerfilePath+":2")
|
||||
require.Contains(t, stdout.String(), dockerfilePath+":3")
|
||||
})
|
||||
}
|
||||
|
||||
func createTestProject(t *testing.T) string {
|
||||
|
|
Loading…
Reference in New Issue