mirror of https://github.com/docker/buildx.git
lint: enable forbidigo context rules
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
c0fd64f4f8
commit
e7a53fb829
|
@ -56,6 +56,10 @@ linters-settings:
|
|||
desc: The io/ioutil package has been deprecated.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||
gosec:
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
type Container struct {
|
||||
cancelOnce sync.Once
|
||||
containerCancel func()
|
||||
containerCancel func(error)
|
||||
isUnavailable atomic.Bool
|
||||
initStarted atomic.Bool
|
||||
container gateway.Container
|
||||
|
@ -31,18 +31,18 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
|||
errCh := make(chan error)
|
||||
go func() {
|
||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
go func() {
|
||||
<-mainCtx.Done()
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
}()
|
||||
|
||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerCtx, containerCancel := context.WithCancel(ctx)
|
||||
defer containerCancel()
|
||||
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||
defer containerCancel(errors.WithStack(context.Canceled))
|
||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -83,7 +83,7 @@ func (c *Container) Cancel() {
|
|||
c.markUnavailable()
|
||||
c.cancelOnce.Do(func() {
|
||||
if c.containerCancel != nil {
|
||||
c.containerCancel()
|
||||
c.containerCancel(errors.WithStack(context.Canceled))
|
||||
}
|
||||
close(c.releaseCh)
|
||||
})
|
||||
|
|
|
@ -82,7 +82,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||
var respHandle *ResultHandle
|
||||
|
||||
go func() {
|
||||
defer cancel(context.Canceled) // ensure no dangling processes
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||
|
||||
var res *gateway.Result
|
||||
var err error
|
||||
|
@ -181,7 +181,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
}, nil)
|
||||
if respHandle != nil {
|
||||
return
|
||||
|
|
|
@ -522,8 +522,9 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||
return nil, err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||
if err != nil {
|
||||
|
|
|
@ -108,8 +108,8 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer cancel(errors.WithStack(context.Canceled))
|
||||
|
||||
var nodes []builder.Node
|
||||
var progressConsoleDesc, progressTextDesc string
|
||||
|
|
|
@ -325,8 +325,8 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
}
|
||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
progressMode, err := options.toDisplayMode()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -173,8 +173,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||
// new resolver cause need new auth
|
||||
r = imagetools.New(imageopt)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -34,8 +35,9 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
if in.bootstrap {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
@ -57,8 +58,9 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
|
|
|
@ -150,8 +150,9 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
|
|
|
@ -109,7 +109,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||
|
||||
// Attach containerIn to this process
|
||||
ioCancelledCh := make(chan struct{})
|
||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func() { close(ioCancelledCh) })
|
||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func(error) { close(ioCancelledCh) })
|
||||
|
||||
select {
|
||||
case <-ioCancelledCh:
|
||||
|
@ -117,7 +117,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||
case err := <-proc.Done():
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,16 +18,16 @@ type Process struct {
|
|||
invokeConfig *pb.InvokeConfig
|
||||
errCh chan error
|
||||
processCancel func()
|
||||
serveIOCancel func()
|
||||
serveIOCancel func(error)
|
||||
}
|
||||
|
||||
// ForwardIO forwards process's io to the specified reader/writer.
|
||||
// Optionally specify ioCancelCallback which will be called when
|
||||
// the process closes the specified IO. This will be useful for additional cleanup.
|
||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func()) {
|
||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func(error)) {
|
||||
p.inEnd.SetIn(in)
|
||||
if f := p.serveIOCancel; f != nil {
|
||||
f()
|
||||
f(errors.WithStack(context.Canceled))
|
||||
}
|
||||
p.serveIOCancel = ioCancelCallback
|
||||
}
|
||||
|
@ -124,9 +124,16 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||
f.SetOut(&out)
|
||||
|
||||
// Register process
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||
var cancelOnce sync.Once
|
||||
processCancelFunc := func() { cancelOnce.Do(func() { cancel(); f.Close(); in.Close(); out.Close() }) }
|
||||
processCancelFunc := func() {
|
||||
cancelOnce.Do(func() {
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
f.Close()
|
||||
in.Close()
|
||||
out.Close()
|
||||
})
|
||||
}
|
||||
p := &Process{
|
||||
inEnd: f,
|
||||
invokeConfig: cfg,
|
||||
|
|
|
@ -62,9 +62,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||
serverRoot := filepath.Join(rootDir, "shared")
|
||||
|
||||
// connect to buildx server if it is already running
|
||||
ctx2, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
ctx2, cancel := context.WithCancelCause(ctx)
|
||||
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
||||
|
@ -90,9 +91,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||
go wait()
|
||||
|
||||
// wait for buildx server to be ready
|
||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
||||
ctx2, cancel = context.WithCancelCause(ctx)
|
||||
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
||||
}
|
||||
|
|
|
@ -344,7 +344,7 @@ func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
|
|||
case err := <-errCh:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ type Server struct {
|
|||
type session struct {
|
||||
buildOnGoing atomic.Bool
|
||||
statusChan chan *pb.StatusResponse
|
||||
cancelBuild func()
|
||||
cancelBuild func(error)
|
||||
buildOptions *pb.BuildOptions
|
||||
inputPipe *io.PipeWriter
|
||||
|
||||
|
@ -109,7 +109,7 @@ func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res
|
|||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[sessionID]; ok {
|
||||
if s.cancelBuild != nil {
|
||||
s.cancelBuild()
|
||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||
}
|
||||
s.cancelRunningProcesses()
|
||||
if s.result != nil {
|
||||
|
@ -127,7 +127,7 @@ func (m *Server) Close() error {
|
|||
for k := range m.session {
|
||||
if s, ok := m.session[k]; ok {
|
||||
if s.cancelBuild != nil {
|
||||
s.cancelBuild()
|
||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||
}
|
||||
s.cancelRunningProcesses()
|
||||
}
|
||||
|
@ -199,8 +199,8 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||
pw := pb.NewProgressWriter(statusChan)
|
||||
|
||||
// Build the specified request
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[sessionID]; ok {
|
||||
|
@ -341,7 +341,7 @@ func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
|
|||
select {
|
||||
case msg = <-msgCh:
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "canceled")
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
if msg == nil {
|
||||
return nil
|
||||
|
@ -370,9 +370,9 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||
initDoneCh := make(chan *processes.Process)
|
||||
initErrCh := make(chan error)
|
||||
eg, egCtx := errgroup.WithContext(context.TODO())
|
||||
srvIOCtx, srvIOCancel := context.WithCancel(egCtx)
|
||||
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
|
||||
eg.Go(func() error {
|
||||
defer srvIOCancel()
|
||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
|
@ -418,7 +418,7 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||
})
|
||||
})
|
||||
eg.Go(func() (rErr error) {
|
||||
defer srvIOCancel()
|
||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||
// Wait for init done
|
||||
var proc *processes.Process
|
||||
select {
|
||||
|
|
|
@ -212,7 +212,7 @@ func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
|
|||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
||||
try++
|
||||
continue
|
||||
|
|
|
@ -112,7 +112,7 @@ func (d *Driver) wait(ctx context.Context) error {
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
case <-timeoutChan:
|
||||
return err
|
||||
case <-ticker.C:
|
||||
|
|
|
@ -47,8 +47,9 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||
return err
|
||||
}
|
||||
return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
ctx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
return c.Wait(ctx)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -326,7 +326,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
|||
if m.AttachedSessionID() == "" {
|
||||
return nil
|
||||
}
|
||||
invokeCtx, invokeCancel := context.WithCancel(ctx)
|
||||
invokeCtx, invokeCancel := context.WithCancelCause(ctx)
|
||||
|
||||
containerIn, containerOut := ioset.Pipe()
|
||||
m.invokeIO.SetOut(&containerOut)
|
||||
|
@ -336,7 +336,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
|||
cancelOnce.Do(func() {
|
||||
containerIn.Close()
|
||||
m.invokeIO.SetOut(nil)
|
||||
invokeCancel()
|
||||
invokeCancel(errors.WithStack(context.Canceled))
|
||||
})
|
||||
<-waitInvokeDoneCh
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
|||
pr, pw := io.Pipe()
|
||||
done := make(chan struct{})
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var w *waitingWriter
|
||||
w = &waitingWriter{
|
||||
PipeWriter: pw,
|
||||
|
@ -67,8 +66,7 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
|||
handleErr(err)
|
||||
}
|
||||
},
|
||||
done: done,
|
||||
cancel: cancel,
|
||||
done: done,
|
||||
}
|
||||
return w, func() {
|
||||
pr.Close()
|
||||
|
@ -101,12 +99,11 @@ func (c *Client) features(ctx context.Context, name string) map[Feature]bool {
|
|||
|
||||
type waitingWriter struct {
|
||||
*io.PipeWriter
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
done chan struct{}
|
||||
cancel func()
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -25,7 +26,7 @@ func WithAccessToken(token string) GitServeOpt {
|
|||
func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
||||
t.Helper()
|
||||
gitUpdateServerInfo(c, t)
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||
|
||||
gs := &gitServe{}
|
||||
for _, opt := range opts {
|
||||
|
@ -38,7 +39,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
|||
name := "test.git"
|
||||
dir, err := c.GitDir()
|
||||
if err != nil {
|
||||
cancel()
|
||||
cancel(err)
|
||||
}
|
||||
|
||||
var addr string
|
||||
|
@ -84,7 +85,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
|||
<-ready
|
||||
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
cancel(errors.Errorf("cleanup"))
|
||||
<-done
|
||||
})
|
||||
return fmt.Sprintf("http://%s/%s", addr, name)
|
||||
|
|
|
@ -61,7 +61,7 @@ func (m *Map) Get(ctx context.Context, keys ...string) (map[string]interface{},
|
|||
m.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
case <-ch:
|
||||
m.mu.Lock()
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestTimeout(t *testing.T) {
|
|||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeoutCause(context.TODO(), 100*time.Millisecond, nil)
|
||||
defer cancel()
|
||||
|
||||
_, err := m.Get(ctx, "bar")
|
||||
|
|
Loading…
Reference in New Issue