controller: refactor progress api

Refactor the progress printer creation to the caller-side of the
controller api. Then, instead of passing around status channels (and
progressMode strings), we can simply pass around the higher level
interface progress.Writer.

This has a couple of benefits:
- A simplified interface to the controller
- Allows us to correctly extract warnings out of the controller, so that
  they can be displayed correctly from the client side.

Some extra work is required to make sure that we can pass a
progress.Printer into the debug monitor. If we want to keep it
persistent, then we need a way to temporarily suspend output from it,
otherwise it will continue printing as the monitor is prompting for
input from the user, and forwarding output from debug containers.

To handle this, we add two methods to the printer, `Pause` and
`Unpause`. `Pause` acts similarly to `Wait`, closing the printer, and
cleanly shutting down the display - however, the printer does not
terminate, and can later be resumed by a call to `Unpause`. This
provides a neater interface to the caller, instead of needing to
continually reconstruct printers for every single time we want to
produce progress output.

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell 2023-04-21 11:17:43 +01:00
parent 0c1fd31226
commit e826141af4
11 changed files with 190 additions and 122 deletions

View File

@ -1,6 +1,7 @@
package commands
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
@ -15,6 +16,7 @@ import (
"github.com/containerd/console"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/controller"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
@ -35,8 +37,11 @@ import (
"github.com/docker/docker/pkg/ioutils"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -202,18 +207,44 @@ func runBuild(dockerCli command.Cli, options buildOptions) (err error) {
}
}
contextPathHash := options.contextPath
if absContextPath, err := filepath.Abs(contextPathHash); err == nil {
contextPathHash = absContextPath
}
b, err := builder.New(dockerCli,
builder.WithName(options.builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
return err
}
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
progressMode, err := options.toProgress()
if err != nil {
return err
}
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
fmt.Sprintf("%s:%s", b.Driver, b.Name),
))
if err != nil {
return err
}
var resp *client.SolveResponse
var retErr error
if isExperimental() {
resp, retErr = runControllerBuild(ctx, dockerCli, options, progressMode)
resp, retErr = runControllerBuild(ctx, dockerCli, options, printer)
} else {
resp, retErr = runBasicBuild(ctx, dockerCli, options, progressMode)
resp, retErr = runBasicBuild(ctx, dockerCli, options, printer)
}
if err := printer.Wait(); retErr == nil {
retErr = err
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
if retErr != nil {
return retErr
}
@ -232,17 +263,17 @@ func runBuild(dockerCli command.Cli, options buildOptions) (err error) {
return nil
}
func runBasicBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, progressMode string) (*client.SolveResponse, error) {
func runBasicBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
opts, err := options.toControllerOptions()
if err != nil {
return nil, err
}
resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, progressMode, nil, false)
resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false)
return resp, err
}
func runControllerBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, progressMode string) (*client.SolveResponse, error) {
func runControllerBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
if options.invoke != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
// stdin must be usable for monitor
return nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
@ -284,7 +315,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, options buil
return nil
})
ref, resp, err = c.Build(ctx, *opts, pr, os.Stdout, os.Stderr, progressMode)
ref, resp, err = c.Build(ctx, *opts, pr, printer)
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {
@ -318,7 +349,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, options buil
}
return nil, errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, ref, opts, options.invoke.InvokeConfig, c, progressMode, pr2, os.Stdout, os.Stderr)
err = monitor.RunMonitor(ctx, ref, opts, options.invoke.InvokeConfig, c, pr2, os.Stdout, os.Stderr, printer)
con.Reset()
if err := pw2.Close(); err != nil {
logrus.Debug("failed to close monitor stdin pipe reader")
@ -869,3 +900,43 @@ func resolvePaths(options *controllerapi.BuildOptions) (_ *controllerapi.BuildOp
return options, nil
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -18,7 +19,7 @@ import (
func debugShellCmd(dockerCli command.Cli) *cobra.Command {
var options control.ControlOptions
var progress string
var progressMode string
cmd := &cobra.Command{
Use: "debug-shell",
@ -38,9 +39,15 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
if err := con.SetRaw(); err != nil {
return errors.Errorf("failed to configure terminal: %v", err)
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode)
if err != nil {
return err
}
err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
Tty: true,
}, c, progress, os.Stdin, os.Stdout, os.Stderr)
}, c, os.Stdin, os.Stdout, os.Stderr, printer)
con.Reset()
return err
},
@ -51,7 +58,7 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect [experimental]")
flags.BoolVar(&options.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server (supported only on linux) [experimental]")
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server) [experimental]")
flags.StringVar(&progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
return cmd
}

View File

@ -1,12 +1,10 @@
package build
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
@ -30,12 +28,8 @@ import (
"github.com/docker/go-units"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
)
@ -46,7 +40,7 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progressMode string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
@ -164,6 +158,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
contextPathHash = in.ContextPath
}
// TODO: this should not be loaded this side of the controller api
b, err := builder.New(dockerCli,
builder.WithName(in.Builder),
builder.WithContextPathHash(contextPathHash),
@ -179,7 +174,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
return nil, nil, err
}
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progressMode, in.MetadataFile, statusChan, generateResult)
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile, generateResult)
err = wrapBuildError(err, false)
if err != nil {
// NOTE: buildTargets can return *build.ResultContext even on error.
@ -193,24 +188,14 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", ng.Name, ng.Driver),
fmt.Sprintf("%s:%s", ng.Driver, ng.Name),
))
if err != nil {
return nil, nil, err
}
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
var res *build.ResultContext
var resp map[string]*client.SolveResponse
var err error
if generateResult {
var mu sync.Mutex
var idx int
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan), func(driverIndex int, gotRes *build.ResultContext) {
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
@ -218,11 +203,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
})
} else {
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan))
}
err1 := printer.Wait()
if err == nil {
err = err1
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
}
if err != nil {
return nil, res, err
@ -234,8 +215,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
for k := range resp {
if opts[k].PrintFunc != nil {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
@ -247,46 +226,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
return resp[defaultTargetName], res, err
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
}
func parsePrintFunc(str string) (*build.PrintFunc, error) {
if str == "" {
return nil, nil

View File

@ -4,13 +4,13 @@ import (
"context"
"io"
"github.com/containerd/console"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
)
type BuildxController interface {
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (ref string, resp *client.SolveResponse, err error)
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
// Invoke starts an IO session into the specified process.
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.

View File

@ -5,7 +5,6 @@ import (
"io"
"sync/atomic"
"github.com/containerd/console"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
@ -13,6 +12,7 @@ import (
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
@ -42,13 +42,13 @@ type localController struct {
buildOnGoing atomic.Bool
}
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) {
return "", nil, errors.New("build ongoing")
}
defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progressMode, nil, true)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
// NOTE: RunBuild can return *build.ResultContext even on error.
if res != nil {
b.buildConfig = buildConfig{

View File

@ -1,10 +1,30 @@
package pb
import (
"github.com/docker/buildx/util/progress"
control "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/opencontainers/go-digest"
)
type writer struct {
ch chan<- *StatusResponse
}
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
return &writer{ch: ch}
}
func (w *writer) Write(status *client.SolveStatus) {
w.ch <- ToControlStatus(status)
}
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
return true
}
func (w *writer) ClearLogSource(interface{}) {}
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
resp := StatusResponse{}
for _, v := range s.Vertexes {

View File

@ -6,7 +6,6 @@ import (
"sync"
"time"
"github.com/containerd/console"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/controller/pb"
@ -114,14 +113,9 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
}
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
ref := identity.NewID()
pw, err := progress.NewPrinter(context.TODO(), w, out, progressMode)
if err != nil {
return "", nil, err
}
statusChan := make(chan *client.SolveStatus)
statusDone := make(chan struct{})
eg, egCtx := errgroup.WithContext(ctx)
var resp *client.SolveResponse
eg.Go(func() error {
@ -131,17 +125,12 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
return err
})
eg.Go(func() error {
defer close(statusDone)
for s := range statusChan {
st := s
pw.Write(st)
progress.Write(st)
}
return nil
})
eg.Go(func() error {
<-statusDone
return pw.Wait()
})
return ref, resp, eg.Wait()
}

View File

@ -21,6 +21,7 @@ import (
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
@ -142,8 +143,8 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}()
// prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, "quiet", statusChan, true)
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
})
defer b.Close()

View File

@ -12,13 +12,14 @@ import (
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (resp *client.SolveResponse, res *build.ResultContext, err error)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
func NewServer(buildFunc BuildFunc) *Server {
return &Server{
@ -34,7 +35,7 @@ type Server struct {
type session struct {
buildOnGoing atomic.Bool
statusChan chan *client.SolveStatus
statusChan chan *pb.StatusResponse
cancelBuild func()
buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter
@ -176,8 +177,9 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
s = &session{}
s.buildOnGoing.Store(true)
}
s.processes = processes.NewManager()
statusChan := make(chan *client.SolveStatus)
statusChan := make(chan *pb.StatusResponse)
s.statusChan = statusChan
inR, inW := io.Pipe()
defer inR.Close()
@ -195,10 +197,12 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan)
// Build the specified request
ctx, cancel := context.WithCancel(ctx)
defer cancel()
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, statusChan)
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
@ -236,7 +240,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *client.SolveStatus
var statusChan <-chan *pb.StatusResponse
for {
// TODO: timeout?
m.sessionMu.Lock()
@ -255,7 +259,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
if ss == nil {
break
}
if err := stream.Send(pb.ToControlStatus(ss)); err != nil {
if err := stream.Send(ss); err != nil {
return err
}
}

View File

@ -15,6 +15,7 @@ import (
controllererrors "github.com/docker/buildx/controller/errdefs"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -36,12 +37,18 @@ Available commands are:
`
// RunMonitor provides an interactive session for running and managing containers via specified IO.
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, progressMode string, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File) error {
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) error {
defer func() {
if err := c.Disconnect(ctx, curRef); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
}()
if err := progress.Pause(); err != nil {
return err
}
defer progress.Unpause()
monitorIn, monitorOut := ioset.Pipe()
defer func() {
monitorIn.Close()
@ -145,7 +152,9 @@ func RunMonitor(ctx context.Context, curRef string, options *controllerapi.Build
}
}
var resultUpdated bool
ref, _, err := c.Build(ctx, *bo, nil, stdout, stderr, progressMode) // TODO: support stdin, hold build ref
progress.Unpause()
ref, _, err := c.Build(ctx, *bo, nil, progress) // TODO: support stdin, hold build ref
progress.Pause()
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {

View File

@ -23,8 +23,12 @@ const (
)
type Printer struct {
status chan *client.SolveStatus
done <-chan struct{}
status chan *client.SolveStatus
ready chan struct{}
done chan struct{}
paused chan struct{}
err error
warnings []client.VertexWarning
logMu sync.Mutex
@ -37,6 +41,16 @@ func (p *Printer) Wait() error {
return p.err
}
func (p *Printer) Pause() error {
p.paused = make(chan struct{})
return p.Wait()
}
func (p *Printer) Unpause() {
close(p.paused)
<-p.ready
}
func (p *Printer) Write(s *client.SolveStatus) {
p.status <- s
}
@ -71,15 +85,6 @@ func (p *Printer) ClearLogSource(v interface{}) {
}
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string, solveStatusOpt ...progressui.DisplaySolveStatusOpt) (*Printer, error) {
statusCh := make(chan *client.SolveStatus)
doneCh := make(chan struct{})
pw := &Printer{
status: statusCh,
done: doneCh,
logSourceMap: map[digest.Digest]interface{}{},
}
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
mode = v
}
@ -98,12 +103,35 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string,
}
}
pw := &Printer{
ready: make(chan struct{}),
}
go func() {
resumeLogs := logutil.Pause(logrus.StandardLogger())
// not using shared context to not disrupt display but let is finish reporting errors
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, statusCh, solveStatusOpt...)
resumeLogs()
close(doneCh)
for {
pw.status = make(chan *client.SolveStatus)
pw.done = make(chan struct{})
pw.logMu.Lock()
pw.logSourceMap = map[digest.Digest]interface{}{}
pw.logMu.Unlock()
close(pw.ready)
resumeLogs := logutil.Pause(logrus.StandardLogger())
// not using shared context to not disrupt display but let is finish reporting errors
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, pw.status, solveStatusOpt...)
resumeLogs()
close(pw.done)
if pw.paused == nil {
break
}
pw.ready = make(chan struct{})
<-pw.paused
pw.paused = nil
}
}()
<-pw.ready
return pw, nil
}