mirror of https://github.com/docker/buildx.git
build: rename ResultContext to ResultHandle
Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
parent
8d822fb06c
commit
cd1648192e
|
@ -667,7 +667,7 @@ func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, do
|
|||
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) {
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
@ -934,9 +934,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
|||
}
|
||||
var rr *client.SolveResponse
|
||||
if resultHandleFunc != nil {
|
||||
var resultCtx *ResultContext
|
||||
resultCtx, rr, err = NewResultContext(ctx, cc, so, "buildx", buildFunc, ch)
|
||||
resultHandleFunc(dp.driverIndex, resultCtx)
|
||||
var resultHandle *ResultHandle
|
||||
resultHandle, rr, err = NewResultHandle(ctx, cc, so, "buildx", buildFunc, ch)
|
||||
resultHandleFunc(dp.driverIndex, resultHandle)
|
||||
} else {
|
||||
rr, err = c.Build(ctx, so, "buildx", buildFunc, ch)
|
||||
}
|
||||
|
|
|
@ -21,10 +21,10 @@ type Container struct {
|
|||
initStarted atomic.Bool
|
||||
container gateway.Container
|
||||
releaseCh chan struct{}
|
||||
resultCtx *ResultContext
|
||||
resultCtx *ResultHandle
|
||||
}
|
||||
|
||||
func NewContainer(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||
func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) {
|
||||
mainCtx := ctx
|
||||
|
||||
ctrCh := make(chan *Container)
|
||||
|
@ -112,7 +112,7 @@ func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, s
|
|||
return err
|
||||
}
|
||||
|
||||
func exec(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
|
||||
processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -20,21 +20,21 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// NewResultContext wraps a call to client.Build, additionally returning a
|
||||
// ResultContext alongside the standard response and error.
|
||||
// NewResultHandle wraps a call to client.Build, additionally returning a
|
||||
// ResultHandle alongside the standard response and error.
|
||||
//
|
||||
// This ResultContext can be used to execute additional build steps in the same
|
||||
// This ResultHandle can be used to execute additional build steps in the same
|
||||
// context as the build occurred, which can allow easy debugging of build
|
||||
// failures and successes.
|
||||
//
|
||||
// If the returned ResultContext is not nil, the caller must call Done() on it.
|
||||
func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultContext, *client.SolveResponse, error) {
|
||||
// If the returned ResultHandle is not nil, the caller must call Done() on it.
|
||||
func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) {
|
||||
// Create a new context to wrap the original, and cancel it when the
|
||||
// caller-provided context is cancelled.
|
||||
//
|
||||
// We derive the context from the background context so that we can forbid
|
||||
// cancellation of the build request after <-done is closed (which we do
|
||||
// before returning the ResultContext).
|
||||
// before returning the ResultHandle).
|
||||
baseCtx := ctx
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
done := make(chan struct{})
|
||||
|
@ -43,7 +43,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
case <-baseCtx.Done():
|
||||
cancel(baseCtx.Err())
|
||||
case <-done:
|
||||
// Once done is closed, we've recorded a ResultContext, so we
|
||||
// Once done is closed, we've recorded a ResultHandle, so we
|
||||
// shouldn't allow cancelling the underlying build request anymore.
|
||||
}
|
||||
}()
|
||||
|
@ -52,9 +52,9 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
//
|
||||
// We do this so that we can discard status messages after the main portion
|
||||
// of the build is complete. This is necessary for the solve error case,
|
||||
// where the original gateway is kept open until the ResultContext is
|
||||
// where the original gateway is kept open until the ResultHandle is
|
||||
// closed - we don't want progress messages from operations in that
|
||||
// ResultContext to display after this function exits.
|
||||
// ResultHandle to display after this function exits.
|
||||
//
|
||||
// Additionally, callers should wait for the progress channel to be closed.
|
||||
// If we keep the session open and never close the progress channel, the
|
||||
|
@ -77,9 +77,9 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
}()
|
||||
defer close(baseCh)
|
||||
|
||||
var resCtx *ResultContext
|
||||
var resp *client.SolveResponse
|
||||
var respErr error
|
||||
var respHandle *ResultHandle
|
||||
|
||||
go func() {
|
||||
defer cancel(context.Canceled) // ensure no dangling processes
|
||||
|
@ -104,14 +104,14 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
// Scenario 1: we failed to evaluate a node somewhere in the
|
||||
// build graph.
|
||||
//
|
||||
// In this case, we construct a ResultContext from this
|
||||
// In this case, we construct a ResultHandle from this
|
||||
// original Build session, and return it alongside the original
|
||||
// build error. We then need to keep the gateway session open
|
||||
// until the caller explicitly closes the ResultContext.
|
||||
// until the caller explicitly closes the ResultHandle.
|
||||
|
||||
var se *errdefs.SolveError
|
||||
if errors.As(err, &se) {
|
||||
resCtx = &ResultContext{
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
solveErr: se,
|
||||
gwClient: c,
|
||||
|
@ -120,21 +120,21 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
respErr = se
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultContext.
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-resCtx.done:
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}, ch)
|
||||
if resCtx != nil {
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// Something unexpected failed during the build, we didn't succeed,
|
||||
// but we also didn't make it far enough to create a ResultContext.
|
||||
// but we also didn't make it far enough to create a ResultHandle.
|
||||
respErr = err
|
||||
close(done)
|
||||
return
|
||||
|
@ -144,7 +144,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
//
|
||||
// In this case, the original gateway session has now been closed
|
||||
// since the Build has been completed. So, we need to create a new
|
||||
// gateway session to populate the ResultContext. To do this, we
|
||||
// gateway session to populate the ResultHandle. To do this, we
|
||||
// need to re-evaluate the target result, in this new session. This
|
||||
// should be instantaneous since the result should be cached.
|
||||
|
||||
|
@ -165,7 +165,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
// successfully evaluated the same result with no issues.
|
||||
return nil, errors.Wrap(err, "inconsistent solve result")
|
||||
}
|
||||
resCtx = &ResultContext{
|
||||
respHandle = &ResultHandle{
|
||||
done: make(chan struct{}),
|
||||
res: res,
|
||||
gwClient: c,
|
||||
|
@ -173,14 +173,14 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
}
|
||||
close(done)
|
||||
|
||||
// Block until the caller closes the ResultContext.
|
||||
// Block until the caller closes the ResultHandle.
|
||||
select {
|
||||
case <-resCtx.done:
|
||||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
}, nil)
|
||||
if resCtx != nil {
|
||||
if respHandle != nil {
|
||||
return
|
||||
}
|
||||
close(done)
|
||||
|
@ -194,7 +194,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp
|
|||
respErr = baseCtx.Err()
|
||||
}
|
||||
}
|
||||
return resCtx, resp, respErr
|
||||
return respHandle, resp, respErr
|
||||
}
|
||||
|
||||
// getDefinition converts a gateway result into a collection of definitions for
|
||||
|
@ -248,8 +248,8 @@ func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*
|
|||
return res, nil
|
||||
}
|
||||
|
||||
// ResultContext is a build result with the client that built it.
|
||||
type ResultContext struct {
|
||||
// ResultHandle is a build result with the client that built it.
|
||||
type ResultHandle struct {
|
||||
res *gateway.Result
|
||||
solveErr *errdefs.SolveError
|
||||
|
||||
|
@ -263,7 +263,7 @@ type ResultContext struct {
|
|||
cleanupsMu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *ResultContext) Done() {
|
||||
func (r *ResultHandle) Done() {
|
||||
r.doneOnce.Do(func() {
|
||||
r.cleanupsMu.Lock()
|
||||
cleanups := r.cleanups
|
||||
|
@ -278,18 +278,18 @@ func (r *ResultContext) Done() {
|
|||
})
|
||||
}
|
||||
|
||||
func (r *ResultContext) registerCleanup(f func()) {
|
||||
func (r *ResultHandle) registerCleanup(f func()) {
|
||||
r.cleanupsMu.Lock()
|
||||
r.cleanups = append(r.cleanups, f)
|
||||
r.cleanupsMu.Unlock()
|
||||
}
|
||||
|
||||
func (r *ResultContext) build(buildFunc gateway.BuildFunc) (err error) {
|
||||
func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
|
||||
_, err = buildFunc(r.gwCtx, r.gwClient)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||
func (r *ResultHandle) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg)
|
||||
|
@ -308,7 +308,7 @@ func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client
|
|||
return containerCfg, nil
|
||||
}
|
||||
|
||||
func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||
func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
|
||||
processCfg := newStartRequest(stdin, stdout, stderr)
|
||||
if r.res != nil && r.solveErr == nil {
|
||||
logrus.Debugf("creating container from successful build")
|
||||
|
|
|
@ -33,10 +33,10 @@ const defaultTargetName = "default"
|
|||
|
||||
// RunBuild runs the specified build and returns the result.
|
||||
//
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
// NOTE: buildTargets can return *build.ResultContext even on error.
|
||||
// NOTE: buildTargets can return *build.ResultHandle even on error.
|
||||
return nil, res, err
|
||||
}
|
||||
return resp, res, nil
|
||||
|
@ -184,17 +184,17 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
|||
|
||||
// buildTargets runs the specified build and returns the result.
|
||||
//
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
var res *build.ResultContext
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
var res *build.ResultHandle
|
||||
var resp map[string]*client.SolveResponse
|
||||
var err error
|
||||
if generateResult {
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
|
|
|
@ -29,7 +29,7 @@ func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger
|
|||
type buildConfig struct {
|
||||
// TODO: these two structs should be merged
|
||||
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
|
||||
resultCtx *build.ResultContext
|
||||
resultCtx *build.ResultHandle
|
||||
buildOptions *controllerapi.BuildOptions
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build
|
|||
defer b.buildOnGoing.Store(false)
|
||||
|
||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||
// NOTE: RunBuild can return *build.ResultContext even on error.
|
||||
// NOTE: RunBuild can return *build.ResultHandle even on error.
|
||||
if res != nil {
|
||||
b.buildConfig = buildConfig{
|
||||
resultCtx: res,
|
||||
|
|
|
@ -98,7 +98,7 @@ func (m *Manager) DeleteProcess(id string) error {
|
|||
// When a container isn't available (i.e. first time invoking or the container has exited) or cfg.Rollback is set,
|
||||
// this method will start a new container and run the process in it. Otherwise, this method starts a new process in the
|
||||
// existing container.
|
||||
func (m *Manager) StartProcess(pid string, resultCtx *build.ResultContext, cfg *pb.InvokeConfig) (*Process, error) {
|
||||
func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *pb.InvokeConfig) (*Process, error) {
|
||||
// Get the target result to invoke a container from
|
||||
var ctr *build.Container
|
||||
if a := m.container.Load(); a != nil {
|
||||
|
|
|
@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
|||
}()
|
||||
|
||||
// prepare server
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
|
||||
})
|
||||
defer b.Close()
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error)
|
||||
|
||||
func NewServer(buildFunc BuildFunc) *Server {
|
||||
return &Server{
|
||||
|
@ -40,7 +40,7 @@ type session struct {
|
|||
buildOptions *pb.BuildOptions
|
||||
inputPipe *io.PipeWriter
|
||||
|
||||
result *build.ResultContext
|
||||
result *build.ResultHandle
|
||||
|
||||
processes *processes.Manager
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[ref]; ok {
|
||||
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
if res != nil {
|
||||
s.result = res
|
||||
s.cancelBuild = cancel
|
||||
|
|
Loading…
Reference in New Issue