Merge pull request #2672 from daghack/dockerfile-path-on-warnings

build: display Dockerfile path on check warnings
This commit is contained in:
Tõnis Tiigi 2024-09-19 08:30:48 -07:00 committed by GitHub
commit f102ad73a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 178 additions and 77 deletions

View File

@ -101,6 +101,9 @@ type Inputs struct {
ContextState *llb.State ContextState *llb.State
DockerfileInline string DockerfileInline string
NamedContexts map[string]NamedContext NamedContexts map[string]NamedContext
// DockerfileMappingSrc and DockerfileMappingDst are filled in by the builder.
DockerfileMappingSrc string
DockerfileMappingDst string
} }
type NamedContext struct { type NamedContext struct {
@ -147,11 +150,11 @@ func toRepoOnly(in string) (string, error) {
return strings.Join(out, ","), nil return strings.Join(out, ","), nil
} }
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) { func Build(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil) return BuildWithResultHandler(ctx, nodes, opts, docker, configDir, w, nil)
} }
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) { func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
if len(nodes) == 0 { if len(nodes) == 0 {
return nil, errors.Errorf("driver required for build") return nil, errors.Errorf("driver required for build")
} }
@ -169,9 +172,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
} }
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) { if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opts) {
var noOutputTargets []string var noOutputTargets []string
for name, opt := range opt { for name, opt := range opts {
if noMobyDriver.Features(ctx)[driver.DefaultLoad] { if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
continue continue
} }
@ -192,7 +195,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
} }
drivers, err := resolveDrivers(ctx, nodes, opt, w) drivers, err := resolveDrivers(ctx, nodes, opts, w)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -209,7 +212,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
reqForNodes := make(map[string][]*reqForNode) reqForNodes := make(map[string][]*reqForNode)
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
for k, opt := range opt { for k, opt := range opts {
multiDriver := len(drivers[k]) > 1 multiDriver := len(drivers[k]) > 1
hasMobyDriver := false hasMobyDriver := false
addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath) addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath)
@ -229,7 +232,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
if err != nil { if err != nil {
return nil, err return nil, err
} }
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker) localOpt := opt
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, &localOpt, gatewayOpts, configDir, w, docker)
opts[k] = localOpt
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -269,7 +274,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
// validate that all links between targets use same drivers // validate that all links between targets use same drivers
for name := range opt { for name := range opts {
dps := reqForNodes[name] dps := reqForNodes[name]
for i, dp := range dps { for i, dp := range dps {
so := reqForNodes[name][i].so so := reqForNodes[name][i].so
@ -305,10 +310,10 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
var respMu sync.Mutex var respMu sync.Mutex
results := waitmap.New() results := waitmap.New()
multiTarget := len(opt) > 1 multiTarget := len(opts) > 1
childTargets := calculateChildTargets(reqForNodes, opt) childTargets := calculateChildTargets(reqForNodes, opts)
for k, opt := range opt { for k, opt := range opts {
err := func(k string) (err error) { err := func(k string) (err error) {
opt := opt opt := opt
dps := drivers[k] dps := drivers[k]

View File

@ -35,7 +35,7 @@ import (
"github.com/tonistiigi/fsutil" "github.com/tonistiigi/fsutil"
) )
func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) { func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) {
nodeDriver := node.Driver nodeDriver := node.Driver
defers := make([]func(), 0, 2) defers := make([]func(), 0, 2)
releaseF := func() { releaseF := func() {
@ -263,7 +263,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
so.Exports = opt.Exports so.Exports = opt.Exports
so.Session = slices.Clone(opt.Session) so.Session = slices.Clone(opt.Session)
releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so) releaseLoad, err := loadInputs(ctx, nodeDriver, &opt.Inputs, pw, &so)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -356,7 +356,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
return &so, releaseF, nil return &so, releaseF, nil
} }
func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) { func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) {
if inp.ContextPath == "" { if inp.ContextPath == "" {
return nil, errors.New("please specify build context (e.g. \".\" for the current directory)") return nil, errors.New("please specify build context (e.g. \".\" for the current directory)")
} }
@ -364,11 +364,12 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
// TODO: handle stdin, symlinks, remote contexts, check files exist // TODO: handle stdin, symlinks, remote contexts, check files exist
var ( var (
err error err error
dockerfileReader io.ReadCloser dockerfileReader io.ReadCloser
dockerfileDir string dockerfileDir string
dockerfileName = inp.DockerfilePath dockerfileName = inp.DockerfilePath
toRemove []string dockerfileSrcName = inp.DockerfilePath
toRemove []string
) )
switch { switch {
@ -440,6 +441,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
if inp.DockerfileInline != "" { if inp.DockerfileInline != "" {
dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline)) dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline))
dockerfileSrcName = "inline"
} else if inp.DockerfilePath == "-" {
dockerfileSrcName = "stdin"
} else if inp.DockerfilePath == "" {
dockerfileSrcName = filepath.Join(inp.ContextPath, "Dockerfile")
} }
if dockerfileReader != nil { if dockerfileReader != nil {
@ -540,6 +546,9 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw prog
_ = os.RemoveAll(dir) _ = os.RemoveAll(dir)
} }
} }
inp.DockerfileMappingSrc = dockerfileSrcName
inp.DockerfileMappingDst = dockerfileName
return release, nil return release, nil
} }

View File

@ -335,7 +335,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
if callFormatJSON { if callFormatJSON {
jsonResults[name] = map[string]any{} jsonResults[name] = map[string]any{}
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
if code, err := printResult(buf, pf, res); err != nil { if code, err := printResult(buf, pf, res, name, &req.Inputs); err != nil {
jsonResults[name]["error"] = err.Error() jsonResults[name]["error"] = err.Error()
exitCode = 1 exitCode = 1
} else if code != 0 && exitCode == 0 { } else if code != 0 && exitCode == 0 {
@ -361,7 +361,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
} }
fmt.Fprintln(dockerCli.Out()) fmt.Fprintln(dockerCli.Out())
if code, err := printResult(dockerCli.Out(), pf, res); err != nil { if code, err := printResult(dockerCli.Out(), pf, res, name, &req.Inputs); err != nil {
fmt.Fprintf(dockerCli.Out(), "error: %v\n", err) fmt.Fprintf(dockerCli.Out(), "error: %v\n", err)
exitCode = 1 exitCode = 1
} else if code != 0 && exitCode == 0 { } else if code != 0 && exitCode == 0 {

View File

@ -49,6 +49,7 @@ import (
"github.com/moby/buildkit/frontend/subrequests/outline" "github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/frontend/subrequests/targets"
"github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/errdefs"
solverpb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui" "github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec" "github.com/morikuni/aec"
@ -346,11 +347,12 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
done := timeBuildCommand(mp, attributes) done := timeBuildCommand(mp, attributes)
var resp *client.SolveResponse var resp *client.SolveResponse
var inputs *build.Inputs
var retErr error var retErr error
if confutil.IsExperimental() { if confutil.IsExperimental() {
resp, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer) resp, inputs, retErr = runControllerBuild(ctx, dockerCli, opts, options, printer)
} else { } else {
resp, retErr = runBasicBuild(ctx, dockerCli, opts, printer) resp, inputs, retErr = runBasicBuild(ctx, dockerCli, opts, printer)
} }
if err := printer.Wait(); retErr == nil { if err := printer.Wait(); retErr == nil {
@ -387,7 +389,7 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
} }
} }
if opts.CallFunc != nil { if opts.CallFunc != nil {
if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil { if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse, options.target, inputs); err != nil {
return err return err
} else if exitcode != 0 { } else if exitcode != 0 {
os.Exit(exitcode) os.Exit(exitcode)
@ -405,22 +407,22 @@ func getImageID(resp map[string]string) string {
return dgst return dgst
} }
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, error) { func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false) resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, *opts, dockerCli.In(), printer, false)
if res != nil { if res != nil {
res.Done() res.Done()
} }
return resp, err return resp, dfmap, err
} }
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) { func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") { if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
// stdin must be usable for monitor // stdin must be usable for monitor
return nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke") return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
} }
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer) c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
defer func() { defer func() {
if err := c.Close(); err != nil { if err := c.Close(); err != nil {
@ -432,12 +434,13 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
// so we need to resolve paths to abosolute ones in the client. // so we need to resolve paths to abosolute ones in the client.
opts, err = controllerapi.ResolveOptionPaths(opts) opts, err = controllerapi.ResolveOptionPaths(opts)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
var ref string var ref string
var retErr error var retErr error
var resp *client.SolveResponse var resp *client.SolveResponse
var inputs *build.Inputs
var f *ioset.SingleForwarder var f *ioset.SingleForwarder
var pr io.ReadCloser var pr io.ReadCloser
@ -455,7 +458,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
}) })
} }
ref, resp, err = c.Build(ctx, *opts, pr, printer) ref, resp, inputs, err = c.Build(ctx, *opts, pr, printer)
if err != nil { if err != nil {
var be *controllererrors.BuildError var be *controllererrors.BuildError
if errors.As(err, &be) { if errors.As(err, &be) {
@ -463,7 +466,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
retErr = err retErr = err
// We can proceed to monitor // We can proceed to monitor
} else { } else {
return nil, errors.Wrapf(err, "failed to build") return nil, nil, errors.Wrapf(err, "failed to build")
} }
} }
@ -504,7 +507,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
} }
} }
return resp, retErr return resp, inputs, retErr
} }
func printError(err error, printer *progress.Printer) error { func printError(err error, printer *progress.Printer) error {
@ -882,7 +885,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
} }
} }
func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) { func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string, target string, inp *build.Inputs) (int, error) {
switch f.Name { switch f.Name {
case "outline": case "outline":
return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res) return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
@ -908,8 +911,27 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
} }
fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg) fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg)
} }
sourceInfoMap := func(sourceInfo *solverpb.SourceInfo) *solverpb.SourceInfo {
if sourceInfo == nil || inp == nil {
return sourceInfo
}
if target == "" {
target = "default"
}
err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res) if inp.DockerfileMappingSrc != "" {
newSourceInfo := *sourceInfo
newSourceInfo.Filename = inp.DockerfileMappingSrc
return &newSourceInfo
}
return sourceInfo
}
printLintWarnings := func(dt []byte, w io.Writer) error {
return lintResults.PrintTo(w, sourceInfoMap)
}
err := printValue(w, printLintWarnings, lint.SubrequestLintDefinition.Version, f.Format, res)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -924,13 +946,8 @@ func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string)
if f.Format != "json" && len(lintResults.Warnings) > 0 { if f.Format != "json" && len(lintResults.Warnings) > 0 {
fmt.Fprintln(w) fmt.Fprintln(w)
} }
lintBuf := bytes.NewBuffer([]byte(lintResults.Error.Message + "\n")) lintBuf := bytes.NewBuffer(nil)
sourceInfo := lintResults.Sources[lintResults.Error.Location.SourceIndex] lintResults.PrintErrorTo(lintBuf, sourceInfoMap)
source := errdefs.Source{
Info: sourceInfo,
Ranges: lintResults.Error.Location.Ranges,
}
source.Print(lintBuf)
return 0, errors.New(lintBuf.String()) return 0, errors.New(lintBuf.String())
} else if len(lintResults.Warnings) == 0 && f.Format != "json" { } else if len(lintResults.Warnings) == 0 && f.Format != "json" {
fmt.Fprintln(w, "Check complete, no warnings found.") fmt.Fprintln(w, "Check complete, no warnings found.")
@ -968,11 +985,6 @@ func printValue(w io.Writer, printer callFunc, version string, format string, re
return printer([]byte(res["result.json"]), w) return printer([]byte(res["result.json"]), w)
} }
// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted
func printLintViolationsWrapper(dt []byte, w io.Writer) error {
return lint.PrintLintViolations(dt, w, nil)
}
type invokeConfig struct { type invokeConfig struct {
controllerapi.InvokeConfig controllerapi.InvokeConfig
onFlag string onFlag string

View File

@ -34,9 +34,9 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle, // NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can // this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error. // inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) { func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 { if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together") return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
} }
contexts := map[string]build.NamedContext{} contexts := map[string]build.NamedContext{}
@ -70,7 +70,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
platforms, err := platformutil.Parse(in.Platforms) platforms, err := platformutil.Parse(in.Platforms)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
opts.Platforms = platforms opts.Platforms = platforms
@ -79,7 +79,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
secrets, err := controllerapi.CreateSecrets(in.Secrets) secrets, err := controllerapi.CreateSecrets(in.Secrets)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
opts.Session = append(opts.Session, secrets) opts.Session = append(opts.Session, secrets)
@ -89,13 +89,13 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
} }
ssh, err := controllerapi.CreateSSH(sshSpecs) ssh, err := controllerapi.CreateSSH(sshSpecs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
opts.Session = append(opts.Session, ssh) opts.Session = append(opts.Session, ssh)
outputs, err := controllerapi.CreateExports(in.Exports) outputs, err := controllerapi.CreateExports(in.Exports)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
if in.ExportPush { if in.ExportPush {
var pushUsed bool var pushUsed bool
@ -134,7 +134,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
annotations, err := buildflags.ParseAnnotations(in.Annotations) annotations, err := buildflags.ParseAnnotations(in.Annotations)
if err != nil { if err != nil {
return nil, nil, errors.Wrap(err, "parse annotations") return nil, nil, nil, errors.Wrap(err, "parse annotations")
} }
for _, o := range outputs { for _, o := range outputs {
@ -154,7 +154,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
allow, err := buildflags.ParseEntitlements(in.Allow) allow, err := buildflags.ParseEntitlements(in.Allow)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
opts.Allow = allow opts.Allow = allow
@ -178,23 +178,28 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
builder.WithContextPathHash(contextPathHash), builder.WithContextPathHash(contextPathHash),
) )
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil { if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time") return nil, nil, nil, errors.Wrapf(err, "failed to update builder last activity time")
} }
nodes, err := b.LoadNodes(ctx) nodes, err := b.LoadNodes(ctx)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
resp, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult) var inputs *build.Inputs
buildOptions := map[string]build.Options{defaultTargetName: opts}
resp, res, err := buildTargets(ctx, dockerCli, nodes, buildOptions, progress, generateResult)
err = wrapBuildError(err, false) err = wrapBuildError(err, false)
if err != nil { if err != nil {
// NOTE: buildTargets can return *build.ResultHandle even on error. // NOTE: buildTargets can return *build.ResultHandle even on error.
return nil, res, err return nil, res, nil, err
} }
return resp, res, nil if i, ok := buildOptions[defaultTargetName]; ok {
inputs = &i.Inputs
}
return resp, res, inputs, nil
} }
// buildTargets runs the specified build and returns the result. // buildTargets runs the specified build and returns the result.

View File

@ -4,13 +4,14 @@ import (
"context" "context"
"io" "io"
"github.com/docker/buildx/build"
controllerapi "github.com/docker/buildx/controller/pb" controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
) )
type BuildxController interface { type BuildxController interface {
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
// Invoke starts an IO session into the specified process. // Invoke starts an IO session into the specified process.
// If pid doesn't matche to any running processes, it starts a new process with the specified config. // If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container. // If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.

View File

@ -42,13 +42,13 @@ type localController struct {
buildOnGoing atomic.Bool buildOnGoing atomic.Bool
} }
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) { func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) { if !b.buildOnGoing.CompareAndSwap(false, true) {
return "", nil, errors.New("build ongoing") return "", nil, nil, errors.New("build ongoing")
} }
defer b.buildOnGoing.Store(false) defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true) resp, res, dockerfileMappings, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
// NOTE: RunBuild can return *build.ResultHandle even on error. // NOTE: RunBuild can return *build.ResultHandle even on error.
if res != nil { if res != nil {
b.buildConfig = buildConfig{ b.buildConfig = buildConfig{
@ -60,9 +60,9 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build
} }
} }
if buildErr != nil { if buildErr != nil {
return "", nil, buildErr return "", nil, nil, buildErr
} }
return b.ref, resp, nil return b.ref, resp, dockerfileMappings, nil
} }
func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) { func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) {

View File

@ -8,6 +8,7 @@ import (
"github.com/containerd/containerd/defaults" "github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/build"
"github.com/docker/buildx/controller/pb" "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
@ -113,7 +114,7 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref}) return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
} }
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) { func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
ref := identity.NewID() ref := identity.NewID()
statusChan := make(chan *client.SolveStatus) statusChan := make(chan *client.SolveStatus)
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
@ -131,7 +132,7 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
} }
return nil return nil
}) })
return ref, resp, eg.Wait() return ref, resp, nil, eg.Wait()
} }
func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) { func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {

View File

@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}() }()
// prepare server // prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) { b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true) return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
}) })
defer b.Close() defer b.Close()

View File

@ -19,7 +19,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error) type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, inp *build.Inputs, err error)
func NewServer(buildFunc BuildFunc) *Server { func NewServer(buildFunc BuildFunc) *Server {
return &Server{ return &Server{
@ -200,7 +200,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
// Build the specified request // Build the specified request
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw) resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock() m.sessionMu.Lock()
if s, ok := m.session[ref]; ok { if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild). // NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).

View File

@ -61,7 +61,7 @@ func (cm *ReloadCmd) Exec(ctx context.Context, args []string) error {
} }
var resultUpdated bool var resultUpdated bool
cm.progress.Unpause() cm.progress.Unpause()
ref, _, err := cm.m.Build(ctx, *bo, nil, cm.progress) // TODO: support stdin, hold build ref ref, _, _, err := cm.m.Build(ctx, *bo, nil, cm.progress) // TODO: support stdin, hold build ref
cm.progress.Pause() cm.progress.Pause()
if err != nil { if err != nil {
var be *controllererrors.BuildError var be *controllererrors.BuildError

View File

@ -10,6 +10,7 @@ import (
"text/tabwriter" "text/tabwriter"
"github.com/containerd/console" "github.com/containerd/console"
"github.com/docker/buildx/build"
"github.com/docker/buildx/controller/control" "github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb" controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/monitor/commands" "github.com/docker/buildx/monitor/commands"
@ -243,8 +244,8 @@ type monitor struct {
lastBuildResult *MonitorBuildResult lastBuildResult *MonitorBuildResult
} }
func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error) { func (m *monitor) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, input *build.Inputs, err error) {
ref, resp, err = m.BuildxController.Build(ctx, options, in, progress) ref, resp, _, err = m.BuildxController.Build(ctx, options, in, progress)
m.lastBuildResult = &MonitorBuildResult{Resp: resp, Err: err} // Record build result m.lastBuildResult = &MonitorBuildResult{Resp: resp, Err: err} // Record build result
return return
} }

View File

@ -1418,4 +1418,48 @@ target "second" {
require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!") require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!")
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!") require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
}) })
t.Run("check for Dockerfile path printed with context when displaying rule check warnings with multiple build targets", func(t *testing.T) {
dockerfile := []byte(`
FROM busybox
copy Dockerfile .
`)
bakefile := []byte(`
target "first" {
dockerfile = "Dockerfile"
}
target "second" {
dockerfile = "subdir/Dockerfile"
}
target "third" {
dockerfile = "subdir/subsubdir/Dockerfile"
}
`)
dir := tmpdir(
t,
fstest.CreateDir("subdir", 0700),
fstest.CreateDir("subdir/subsubdir", 0700),
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("subdir/Dockerfile", dockerfile, 0600),
fstest.CreateFile("subdir/subsubdir/Dockerfile", dockerfile, 0600),
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
)
dockerfilePathFirst := filepath.Join("Dockerfile")
dockerfilePathSecond := filepath.Join("subdir", "Dockerfile")
dockerfilePathThird := filepath.Join("subdir", "subsubdir", "Dockerfile")
cmd := buildxCmd(
sb,
withDir(dir),
withArgs("bake", "--call", "check", "first", "second", "third"),
)
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
cmd.Stdout = &stdout
cmd.Stderr = &stderr
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
require.Contains(t, stdout.String(), dockerfilePathFirst+":3")
require.Contains(t, stdout.String(), dockerfilePathSecond+":3")
require.Contains(t, stdout.String(), dockerfilePathThird+":3")
})
} }

View File

@ -1291,6 +1291,29 @@ cOpy Dockerfile .
require.Error(t, cmd.Run(), stdout.String(), stderr.String()) require.Error(t, cmd.Run(), stdout.String(), stderr.String())
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!") require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
}) })
t.Run("check for Dockerfile path printed with context when displaying rule check warnings", func(t *testing.T) {
dockerfile := []byte(`
frOM busybox as base
cOpy Dockerfile .
`)
dir := tmpdir(
t,
fstest.CreateDir("subdir", 0700),
fstest.CreateFile("subdir/Dockerfile", dockerfile, 0600),
)
dockerfilePath := filepath.Join(dir, "subdir", "Dockerfile")
cmd := buildxCmd(sb, withArgs("build", "--call=check", "-f", dockerfilePath, dir))
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
cmd.Stdout = &stdout
cmd.Stderr = &stderr
require.Error(t, cmd.Run(), stdout.String(), stderr.String())
require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!")
require.Contains(t, stdout.String(), dockerfilePath+":2")
require.Contains(t, stdout.String(), dockerfilePath+":3")
})
} }
func createTestProject(t *testing.T) string { func createTestProject(t *testing.T) string {