mirror of https://github.com/docker/buildx.git
Merge pull request #1804 from jedevc/fixup-solve
This commit is contained in:
commit
7cef021a8a
|
@ -65,7 +65,7 @@ var (
|
|||
)
|
||||
|
||||
const (
|
||||
printFallbackImage = "docker/dockerfile-upstream:1.4-outline@sha256:627443ff4e2d0f635d429cfc1da5388bcd5a70949c38adcd3cd7c4e5df67c73c"
|
||||
printFallbackImage = "docker/dockerfile:1.5.2-labs@sha256:f2e91734a84c0922ff47aa4098ab775f1dfa932430d2888dd5cad5251fafdac4"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
|
@ -667,7 +667,7 @@ func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, do
|
|||
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) {
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
@ -885,61 +885,61 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
|
||||
cc := c
|
||||
var printRes map[string][]byte
|
||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
var isFallback bool
|
||||
var origErr error
|
||||
for {
|
||||
if opt.PrintFunc != nil {
|
||||
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
||||
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
||||
} else {
|
||||
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
||||
}
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
||||
if isFallback {
|
||||
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
|
||||
}
|
||||
buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
if opt.PrintFunc != nil {
|
||||
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
||||
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
||||
} else {
|
||||
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
||||
}
|
||||
res, err := c.Solve(ctx, req)
|
||||
if err != nil {
|
||||
if origErr != nil {
|
||||
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
||||
}
|
||||
|
||||
res, err := c.Solve(ctx, req)
|
||||
if err != nil {
|
||||
fallback := false
|
||||
var reqErr *errdefs.UnsupportedSubrequestError
|
||||
if errors.As(err, &reqErr) {
|
||||
switch reqErr.Name {
|
||||
case "frontend.outline", "frontend.targets":
|
||||
fallback = true
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
var reqErr *errdefs.UnsupportedSubrequestError
|
||||
if !isFallback {
|
||||
if errors.As(err, &reqErr) {
|
||||
switch reqErr.Name {
|
||||
case "frontend.outline", "frontend.targets":
|
||||
isFallback = true
|
||||
origErr = err
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
|
||||
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
|
||||
isFallback = true
|
||||
origErr = err
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if opt.PrintFunc != nil {
|
||||
printRes = res.Metadata
|
||||
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
|
||||
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
|
||||
fallback = true
|
||||
}
|
||||
results.Set(resultKey(dp.driverIndex, k), res)
|
||||
if resultHandleFunc != nil {
|
||||
resultCtx, err := NewResultContext(ctx, cc, so, res)
|
||||
if err == nil {
|
||||
resultHandleFunc(dp.driverIndex, resultCtx)
|
||||
} else {
|
||||
logrus.Warnf("failed to record result: %s", err)
|
||||
|
||||
if fallback {
|
||||
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
|
||||
res2, err2 := c.Solve(ctx, req)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = res2
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}, ch)
|
||||
if opt.PrintFunc != nil {
|
||||
printRes = res.Metadata
|
||||
}
|
||||
|
||||
results.Set(resultKey(dp.driverIndex, k), res)
|
||||
return res, nil
|
||||
}
|
||||
var rr *client.SolveResponse
|
||||
if resultHandleFunc != nil {
|
||||
var resultHandle *ResultHandle
|
||||
resultHandle, rr, err = NewResultHandle(ctx, cc, so, "buildx", buildFunc, ch)
|
||||
resultHandleFunc(dp.driverIndex, resultHandle)
|
||||
} else {
|
||||
rr, err = c.Build(ctx, so, "buildx", buildFunc, ch)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -21,10 +21,10 @@ type Container struct {
|
|||
initStarted atomic.Bool
|
||||
container gateway.Container
|
||||
releaseCh chan struct{}
|
||||
resultCtx *ResultContext
|
||||
resultCtx *ResultHandle
|
||||
}
|
||||
|
||||
func NewContainer(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||
func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||
mainCtx := ctx
|
||||
|
||||
ctrCh := make(chan *Container)
|
||||
|
@ -112,7 +112,7 @@ func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, s
|
|||
return err
|
||||
}
|
||||
|
||||
func exec(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
351
build/result.go
351
build/result.go
|
@ -6,8 +6,6 @@ import (
|
|||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/moby/buildkit/client"
|
||||
|
@ -22,14 +20,187 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func NewResultContext(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) {
|
||||
def, err := getDefinition(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// NewResultHandle makes a call to client.Build, additionally returning a
|
||||
// opaque ResultHandle alongside the standard response and error.
|
||||
//
|
||||
// This ResultHandle can be used to execute additional build steps in the same
|
||||
// context as the build occurred, which can allow easy debugging of build
|
||||
// failures and successes.
|
||||
//
|
||||
// If the returned ResultHandle is not nil, the caller must call Done() on it.
|
||||
func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) {
|
||||
// Create a new context to wrap the original, and cancel it when the
|
||||
// caller-provided context is cancelled.
|
||||
//
|
||||
// We derive the context from the background context so that we can forbid
|
||||
// cancellation of the build request after <-done is closed (which we do
|
||||
// before returning the ResultHandle).
|
||||
baseCtx := ctx
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-baseCtx.Done():
|
||||
cancel(baseCtx.Err())
|
||||
case <-done:
|
||||
// Once done is closed, we've recorded a ResultHandle, so we
|
||||
// shouldn't allow cancelling the underlying build request anymore.
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a new channel to forward status messages to the original.
|
||||
//
|
||||
// We do this so that we can discard status messages after the main portion
|
||||
// of the build is complete. This is necessary for the solve error case,
|
||||
// where the original gateway is kept open until the ResultHandle is
|
||||
// closed - we don't want progress messages from operations in that
|
||||
// ResultHandle to display after this function exits.
|
||||
//
|
||||
// Additionally, callers should wait for the progress channel to be closed.
|
||||
// If we keep the session open and never close the progress channel, the
|
||||
// caller will likely hang.
|
||||
baseCh := ch
|
||||
ch = make(chan *client.SolveStatus)
|
||||
go func() {
|
||||
for {
|
||||
s, ok := <-ch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-baseCh:
|
||||
// base channel is closed, discard status messages
|
||||
default:
|
||||
baseCh <- s
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer close(baseCh)
|
||||
|
||||
var resp *client.SolveResponse
|
||||
var respErr error
|
||||
var respHandle *ResultHandle
|
||||
|
||||
go func() {
|
||||
defer cancel(context.Canceled) // ensure no dangling processes
|
||||
|
||||
var res *gateway.Result
|
||||
var err error
|
||||
resp, err = cc.Build(ctx, opt, product, func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
var err error
|
||||
res, err = buildFunc(ctx, c)
|
||||
|
||||
if res != nil && err == nil {
|
||||
// Force evaluation of the build result (otherwise, we likely
|
||||
// won't get a solve error)
|
||||
def, err2 := getDefinition(ctx, res)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
res, err = evalDefinition(ctx, c, def)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Scenario 1: we failed to evaluate a node somewhere in the
|
||||
// build graph.
|
||||
//
|
||||
// In this case, we construct a ResultHandle from this
|
||||
// original Build session, and return it alongside the original
|
||||
// build error. We then need to keep the gateway session open
|
||||
// until the caller explicitly closes the ResultHandle.
|
||||
|
||||
var se *errdefs.SolveError
|
||||
if errors.As(err, &se) {
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
solveErr: se,
|
||||
gwClient: c,
|
||||
gwCtx: ctx,
|
||||
}
|
||||
respErr = se
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}, ch)
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// Something unexpected failed during the build, we didn't succeed,
|
||||
// but we also didn't make it far enough to create a ResultHandle.
|
||||
respErr = err
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
// Scenario 2: we successfully built the image with no errors.
|
||||
//
|
||||
// In this case, the original gateway session has now been closed
|
||||
// since the Build has been completed. So, we need to create a new
|
||||
// gateway session to populate the ResultHandle. To do this, we
|
||||
// need to re-evaluate the target result, in this new session. This
|
||||
// should be instantaneous since the result should be cached.
|
||||
|
||||
def, err := getDefinition(ctx, res)
|
||||
if err != nil {
|
||||
respErr = err
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: ideally this second connection should be lazily opened
|
||||
opt := opt
|
||||
opt.Ref = ""
|
||||
opt.Exports = nil
|
||||
opt.CacheExports = nil
|
||||
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
res, err := evalDefinition(ctx, c, def)
|
||||
if err != nil {
|
||||
// This should probably not happen, since we've previously
|
||||
// successfully evaluated the same result with no issues.
|
||||
return nil, errors.Wrap(err, "inconsistent solve result")
|
||||
}
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
res: res,
|
||||
gwClient: c,
|
||||
gwCtx: ctx,
|
||||
}
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
}, nil)
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Block until the other thread signals that it's completed the build.
|
||||
select {
|
||||
case <-done:
|
||||
case <-baseCtx.Done():
|
||||
if respErr == nil {
|
||||
respErr = baseCtx.Err()
|
||||
}
|
||||
}
|
||||
return getResultAt(ctx, c, solveOpt, def, nil)
|
||||
return respHandle, resp, respErr
|
||||
}
|
||||
|
||||
// getDefinition converts a gateway result into a collection of definitions for
|
||||
// each ref in the result.
|
||||
func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) {
|
||||
return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) {
|
||||
st, err := ref.ToState()
|
||||
|
@ -44,136 +215,58 @@ func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb
|
|||
})
|
||||
}
|
||||
|
||||
func getResultAt(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, targets *result.Result[*pb.Definition], statusChan chan *client.SolveStatus) (*ResultContext, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// forward SolveStatus
|
||||
done := new(atomic.Bool)
|
||||
defer done.Store(true)
|
||||
ch := make(chan *client.SolveStatus)
|
||||
go func() {
|
||||
for {
|
||||
s := <-ch
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if done.Load() {
|
||||
// Do not forward if the function returned because statusChan is possibly closed
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case statusChan <- s:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// get result
|
||||
resultCtxCh := make(chan *ResultContext)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
solveOpt := solveOpt
|
||||
solveOpt.Ref = ""
|
||||
buildDoneCh := make(chan struct{})
|
||||
_, err := c.Build(context.Background(), solveOpt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
doneErr := errors.Errorf("done")
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
defer cancel(doneErr)
|
||||
|
||||
// force evaluation of all targets in parallel
|
||||
results := make(map[*pb.Definition]*gateway.Result)
|
||||
resultsMu := sync.Mutex{}
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
targets.EachRef(func(def *pb.Definition) error {
|
||||
eg.Go(func() error {
|
||||
res2, err := c.Solve(egCtx, gateway.SolveRequest{
|
||||
Evaluate: true,
|
||||
Definition: def,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resultsMu.Lock()
|
||||
results[def] = res2
|
||||
resultsMu.Unlock()
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
// evalDefinition performs the reverse of getDefinition, converting a
|
||||
// collection of definitions into a gateway result.
|
||||
func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*pb.Definition]) (*gateway.Result, error) {
|
||||
// force evaluation of all targets in parallel
|
||||
results := make(map[*pb.Definition]*gateway.Result)
|
||||
resultsMu := sync.Mutex{}
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
defs.EachRef(func(def *pb.Definition) error {
|
||||
eg.Go(func() error {
|
||||
res, err := c.Solve(egCtx, gateway.SolveRequest{
|
||||
Evaluate: true,
|
||||
Definition: def,
|
||||
})
|
||||
resultCtx := ResultContext{}
|
||||
if err := eg.Wait(); err != nil {
|
||||
var se *errdefs.SolveError
|
||||
if errors.As(err, &se) {
|
||||
resultCtx.solveErr = se
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res2, _ := result.ConvertResult(targets, func(def *pb.Definition) (gateway.Reference, error) {
|
||||
if res, ok := results[def]; ok {
|
||||
return res.Ref, nil
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
// Record the client and ctx as well so that containers can be created from the SolveError.
|
||||
resultCtx.res = res2
|
||||
resultCtx.gwClient = c
|
||||
resultCtx.gwCtx = ctx
|
||||
resultCtx.gwDone = func() {
|
||||
cancel(doneErr)
|
||||
// wait for Build() completion(or timeout) to ensure the Build's finalizing and avoiding an error "context canceled"
|
||||
select {
|
||||
case <-buildDoneCh:
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
select {
|
||||
case resultCtxCh <- &resultCtx:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
// wait for cleanup or cancel
|
||||
<-ctx.Done()
|
||||
if context.Cause(ctx) != doneErr { // doneErr is not an error.
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return nil, nil
|
||||
}, ch)
|
||||
close(buildDoneCh)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case resultCtx := <-resultCtxCh:
|
||||
return resultCtx, nil
|
||||
case err := <-errCh:
|
||||
resultsMu.Lock()
|
||||
results[def] = res
|
||||
resultsMu.Unlock()
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
res, _ := result.ConvertResult(defs, func(def *pb.Definition) (gateway.Reference, error) {
|
||||
if res, ok := results[def]; ok {
|
||||
return res.Ref, nil
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ResultContext is a build result with the client that built it.
|
||||
type ResultContext struct {
|
||||
// ResultHandle is a build result with the client that built it.
|
||||
type ResultHandle struct {
|
||||
res *gateway.Result
|
||||
solveErr *errdefs.SolveError
|
||||
|
||||
gwClient gateway.Client
|
||||
gwCtx context.Context
|
||||
gwDone func()
|
||||
gwDoneOnce sync.Once
|
||||
done chan struct{}
|
||||
doneOnce sync.Once
|
||||
|
||||
gwClient gateway.Client
|
||||
gwCtx context.Context
|
||||
|
||||
cleanups []func()
|
||||
cleanupsMu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *ResultContext) Done() {
|
||||
r.gwDoneOnce.Do(func() {
|
||||
func (r *ResultHandle) Done() {
|
||||
r.doneOnce.Do(func() {
|
||||
r.cleanupsMu.Lock()
|
||||
cleanups := r.cleanups
|
||||
r.cleanups = nil
|
||||
|
@ -181,22 +274,24 @@ func (r *ResultContext) Done() {
|
|||
for _, f := range cleanups {
|
||||
f()
|
||||
}
|
||||
r.gwDone()
|
||||
|
||||
close(r.done)
|
||||
<-r.gwCtx.Done()
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ResultContext) registerCleanup(f func()) {
|
||||
func (r *ResultHandle) registerCleanup(f func()) {
|
||||
r.cleanupsMu.Lock()
|
||||
r.cleanups = append(r.cleanups, f)
|
||||
r.cleanupsMu.Unlock()
|
||||
}
|
||||
|
||||
func (r *ResultContext) build(buildFunc gateway.BuildFunc) (err error) {
|
||||
func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
||||
_, err = buildFunc(r.gwCtx, r.gwClient)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||
func (r *ResultHandle) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg)
|
||||
|
@ -215,7 +310,7 @@ func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client
|
|||
return containerCfg, nil
|
||||
}
|
||||
|
||||
func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||
func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||
processCfg := newStartRequest(stdin, stdout, stderr)
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
|
|
|
@ -304,7 +304,10 @@ func getImageID(resp map[string]string) string {
|
|||
}
|
||||
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
||||
resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false)
|
||||
resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false)
|
||||
if res != nil {
|
||||
res.Done()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
|
|
|
@ -33,10 +33,10 @@ const defaultTargetName = "default"
|
|||
|
||||
// RunBuild runs the specified build and returns the result.
|
||||
//
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
// NOTE: buildTargets can return *build.ResultContext even on error.
|
||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||
return nil, res, err
|
||||
}
|
||||
return resp, res, nil
|
||||
|
@ -184,17 +184,17 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
// buildTargets runs the specified build and returns the result.
|
||||
//
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
var res *build.ResultContext
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
var res *build.ResultHandle
|
||||
var resp map[string]*client.SolveResponse
|
||||
var err error
|
||||
if generateResult {
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
|
|
|
@ -29,7 +29,7 @@ func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger
|
|||
type buildConfig struct {
|
||||
// TODO: these two structs should be merged
|
||||
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
|
||||
resultCtx *build.ResultContext
|
||||
resultCtx *build.ResultHandle
|
||||
buildOptions *controllerapi.BuildOptions
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build
|
|||
defer b.buildOnGoing.Store(false)
|
||||
|
||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||
// NOTE: RunBuild can return *build.ResultContext even on error.
|
||||
// NOTE: RunBuild can return *build.ResultHandle even on error.
|
||||
if res != nil {
|
||||
b.buildConfig = buildConfig{
|
||||
resultCtx: res,
|
||||
|
|
|
@ -98,7 +98,7 @@ func (m *Manager) DeleteProcess(id string) error {
|
|||
// When a container isn't available (i.e. first time invoking or the container has exited) or cfg.Rollback is set,
|
||||
// this method will start a new container and run the process in it. Otherwise, this method starts a new process in the
|
||||
// existing container.
|
||||
func (m *Manager) StartProcess(pid string, resultCtx *build.ResultContext, cfg *pb.InvokeConfig) (*Process, error) {
|
||||
func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *pb.InvokeConfig) (*Process, error) {
|
||||
// Get the target result to invoke a container from
|
||||
var ctr *build.Container
|
||||
if a := m.container.Load(); a != nil {
|
||||
|
|
|
@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|||
}()
|
||||
|
||||
// prepare server
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
|
||||
})
|
||||
defer b.Close()
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error)
|
||||
|
||||
func NewServer(buildFunc BuildFunc) *Server {
|
||||
return &Server{
|
||||
|
@ -40,7 +40,7 @@ type session struct {
|
|||
buildOptions *pb.BuildOptions
|
||||
inputPipe *io.PipeWriter
|
||||
|
||||
result *build.ResultContext
|
||||
result *build.ResultHandle
|
||||
|
||||
processes *processes.Manager
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[ref]; ok {
|
||||
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
if res != nil {
|
||||
s.result = res
|
||||
s.cancelBuild = cancel
|
||||
|
|
Loading…
Reference in New Issue