mirror of https://github.com/docker/buildx.git
Merge pull request #2798 from tonistiigi/linter-updates
Improve linter checks
This commit is contained in:
commit
26bbddb5d6
|
@ -5,22 +5,45 @@ run:
|
|||
|
||||
linters:
|
||||
enable:
|
||||
- gofmt
|
||||
- govet
|
||||
- bodyclose
|
||||
- depguard
|
||||
- forbidigo
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- makezero
|
||||
- misspell
|
||||
- unused
|
||||
- noctx
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- nolintlint
|
||||
- gosec
|
||||
- forbidigo
|
||||
- unused
|
||||
- whitespace
|
||||
disable-all: true
|
||||
|
||||
linters-settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- "ifElseChain"
|
||||
- "assignOp"
|
||||
- "appendAssign"
|
||||
- "singleCaseSwitch"
|
||||
- "exitAfterDefer" # FIXME
|
||||
importas:
|
||||
alias:
|
||||
# Enforce alias to prevent it accidentally being used instead of
|
||||
# buildkit errdefs package (or vice-versa).
|
||||
- pkg: "github.com/containerd/errdefs"
|
||||
alias: "cerrdefs"
|
||||
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
alias: "ocispecs"
|
||||
- pkg: "github.com/opencontainers/go-digest"
|
||||
alias: "digest"
|
||||
govet:
|
||||
enable:
|
||||
- nilness
|
||||
|
@ -43,6 +66,10 @@ linters-settings:
|
|||
desc: The io/ioutil package has been deprecated.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||
gosec:
|
||||
|
|
|
@ -179,7 +179,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
|||
c.Targets = append(c.Targets, t)
|
||||
}
|
||||
c.Groups = append(c.Groups, g)
|
||||
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
|
|
|
@ -170,7 +170,6 @@ func indexOfFunc() function.Function {
|
|||
}
|
||||
}
|
||||
return cty.NilVal, errors.New("item not found")
|
||||
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
type Container struct {
|
||||
cancelOnce sync.Once
|
||||
containerCancel func()
|
||||
containerCancel func(error)
|
||||
isUnavailable atomic.Bool
|
||||
initStarted atomic.Bool
|
||||
container gateway.Container
|
||||
|
@ -31,18 +31,18 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
|||
errCh := make(chan error)
|
||||
go func() {
|
||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
go func() {
|
||||
<-mainCtx.Done()
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
}()
|
||||
|
||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerCtx, containerCancel := context.WithCancel(ctx)
|
||||
defer containerCancel()
|
||||
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||
defer containerCancel(errors.WithStack(context.Canceled))
|
||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -83,7 +83,7 @@ func (c *Container) Cancel() {
|
|||
c.markUnavailable()
|
||||
c.cancelOnce.Do(func() {
|
||||
if c.containerCancel != nil {
|
||||
c.containerCancel()
|
||||
c.containerCancel(errors.WithStack(context.Canceled))
|
||||
}
|
||||
close(c.releaseCh)
|
||||
})
|
||||
|
|
|
@ -82,7 +82,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||
var respHandle *ResultHandle
|
||||
|
||||
go func() {
|
||||
defer cancel(context.Canceled) // ensure no dangling processes
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||
|
||||
var res *gateway.Result
|
||||
var err error
|
||||
|
@ -181,7 +181,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
|||
case <-respHandle.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
}, nil)
|
||||
if respHandle != nil {
|
||||
return
|
||||
|
|
|
@ -288,7 +288,15 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
builders := make([]*Builder, len(storeng))
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(contexts, func(i, j int) bool {
|
||||
return contexts[i].Name < contexts[j].Name
|
||||
})
|
||||
|
||||
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||
seen := make(map[string]struct{})
|
||||
for i, ng := range storeng {
|
||||
b, err := New(dockerCli,
|
||||
|
@ -303,14 +311,6 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
|||
seen[b.NodeGroup.Name] = struct{}{}
|
||||
}
|
||||
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Slice(contexts, func(i, j int) bool {
|
||||
return contexts[i].Name < contexts[j].Name
|
||||
})
|
||||
|
||||
for _, c := range contexts {
|
||||
// if a context has the same name as an instance from the store, do not
|
||||
// add it to the builders list. An instance from the store takes
|
||||
|
@ -522,8 +522,9 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
|||
return nil, err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||
if err != nil {
|
||||
|
|
|
@ -108,8 +108,8 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer cancel(errors.WithStack(context.Canceled))
|
||||
|
||||
var nodes []builder.Node
|
||||
var progressConsoleDesc, progressTextDesc string
|
||||
|
|
|
@ -325,8 +325,8 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
}
|
||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
progressMode, err := options.toDisplayMode()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -885,7 +885,6 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
|||
src.Print(w)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||
}
|
||||
|
||||
fileArgs := make([]string, len(in.files))
|
||||
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||
for i, f := range in.files {
|
||||
dt, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
|
@ -173,8 +173,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
|||
// new resolver cause need new auth
|
||||
r = imagetools.New(imageopt)
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -34,8 +35,9 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||
if in.bootstrap {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
@ -57,8 +58,9 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
|
@ -319,7 +321,7 @@ func (tp truncatedPlatforms) String() string {
|
|||
if tpf, ok := tp.res[mpf]; ok {
|
||||
seen[mpf] = struct{}{}
|
||||
if len(tpf) == 1 {
|
||||
out = append(out, fmt.Sprintf("%s", tpf[0]))
|
||||
out = append(out, tpf[0])
|
||||
count++
|
||||
} else {
|
||||
hasPreferredPlatform := false
|
||||
|
@ -347,7 +349,7 @@ func (tp truncatedPlatforms) String() string {
|
|||
continue
|
||||
}
|
||||
if len(tp.res[mpf]) == 1 {
|
||||
out = append(out, fmt.Sprintf("%s", tp.res[mpf][0]))
|
||||
out = append(out, tp.res[mpf][0])
|
||||
count++
|
||||
} else {
|
||||
hasPreferredPlatform := false
|
||||
|
|
|
@ -150,8 +150,9 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
|||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
|
|
|
@ -46,7 +46,6 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||
|
||||
// Attach containerIn to this process
|
||||
ioCancelledCh := make(chan struct{})
|
||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func() { close(ioCancelledCh) })
|
||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func(error) { close(ioCancelledCh) })
|
||||
|
||||
select {
|
||||
case <-ioCancelledCh:
|
||||
|
@ -117,7 +117,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
|||
case err := <-proc.Done():
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,6 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
|||
}
|
||||
}
|
||||
ps = append(ps, p)
|
||||
|
||||
}
|
||||
s.Paths = ps
|
||||
ssh = append(ssh, s)
|
||||
|
|
|
@ -22,9 +22,7 @@ func (w *writer) Write(status *client.SolveStatus) {
|
|||
w.ch <- ToControlStatus(status)
|
||||
}
|
||||
|
||||
func (w *writer) WriteBuildRef(target string, ref string) {
|
||||
return
|
||||
}
|
||||
func (w *writer) WriteBuildRef(target string, ref string) {}
|
||||
|
||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
||||
return true
|
||||
|
|
|
@ -18,16 +18,16 @@ type Process struct {
|
|||
invokeConfig *pb.InvokeConfig
|
||||
errCh chan error
|
||||
processCancel func()
|
||||
serveIOCancel func()
|
||||
serveIOCancel func(error)
|
||||
}
|
||||
|
||||
// ForwardIO forwards process's io to the specified reader/writer.
|
||||
// Optionally specify ioCancelCallback which will be called when
|
||||
// the process closes the specified IO. This will be useful for additional cleanup.
|
||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func()) {
|
||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func(error)) {
|
||||
p.inEnd.SetIn(in)
|
||||
if f := p.serveIOCancel; f != nil {
|
||||
f()
|
||||
f(errors.WithStack(context.Canceled))
|
||||
}
|
||||
p.serveIOCancel = ioCancelCallback
|
||||
}
|
||||
|
@ -124,9 +124,16 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
|||
f.SetOut(&out)
|
||||
|
||||
// Register process
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||
var cancelOnce sync.Once
|
||||
processCancelFunc := func() { cancelOnce.Do(func() { cancel(); f.Close(); in.Close(); out.Close() }) }
|
||||
processCancelFunc := func() {
|
||||
cancelOnce.Do(func() {
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
f.Close()
|
||||
in.Close()
|
||||
out.Close()
|
||||
})
|
||||
}
|
||||
p := &Process{
|
||||
inEnd: f,
|
||||
invokeConfig: cfg,
|
||||
|
|
|
@ -62,9 +62,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||
serverRoot := filepath.Join(rootDir, "shared")
|
||||
|
||||
// connect to buildx server if it is already running
|
||||
ctx2, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
ctx2, cancel := context.WithCancelCause(ctx)
|
||||
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
||||
|
@ -90,9 +91,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
|||
go wait()
|
||||
|
||||
// wait for buildx server to be ready
|
||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
||||
ctx2, cancel = context.WithCancelCause(ctx)
|
||||
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
cancel(errors.WithStack(context.Canceled))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
||||
}
|
||||
|
|
|
@ -302,7 +302,6 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
|||
out = cfg.stderr
|
||||
default:
|
||||
return errors.Errorf("unsupported fd %d", file.Fd)
|
||||
|
||||
}
|
||||
if out == nil {
|
||||
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
||||
|
@ -345,7 +344,7 @@ func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
|
|||
case err := <-errCh:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ type Server struct {
|
|||
type session struct {
|
||||
buildOnGoing atomic.Bool
|
||||
statusChan chan *pb.StatusResponse
|
||||
cancelBuild func()
|
||||
cancelBuild func(error)
|
||||
buildOptions *pb.BuildOptions
|
||||
inputPipe *io.PipeWriter
|
||||
|
||||
|
@ -109,7 +109,7 @@ func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res
|
|||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[sessionID]; ok {
|
||||
if s.cancelBuild != nil {
|
||||
s.cancelBuild()
|
||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||
}
|
||||
s.cancelRunningProcesses()
|
||||
if s.result != nil {
|
||||
|
@ -127,7 +127,7 @@ func (m *Server) Close() error {
|
|||
for k := range m.session {
|
||||
if s, ok := m.session[k]; ok {
|
||||
if s.cancelBuild != nil {
|
||||
s.cancelBuild()
|
||||
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||
}
|
||||
s.cancelRunningProcesses()
|
||||
}
|
||||
|
@ -199,8 +199,8 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
|||
pw := pb.NewProgressWriter(statusChan)
|
||||
|
||||
// Build the specified request
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[sessionID]; ok {
|
||||
|
@ -341,7 +341,7 @@ func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
|
|||
select {
|
||||
case msg = <-msgCh:
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "canceled")
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
if msg == nil {
|
||||
return nil
|
||||
|
@ -370,9 +370,9 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||
initDoneCh := make(chan *processes.Process)
|
||||
initErrCh := make(chan error)
|
||||
eg, egCtx := errgroup.WithContext(context.TODO())
|
||||
srvIOCtx, srvIOCancel := context.WithCancel(egCtx)
|
||||
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
|
||||
eg.Go(func() error {
|
||||
defer srvIOCancel()
|
||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
|
@ -418,7 +418,7 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
|||
})
|
||||
})
|
||||
eg.Go(func() (rErr error) {
|
||||
defer srvIOCancel()
|
||||
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||
// Wait for init done
|
||||
var proc *processes.Process
|
||||
select {
|
||||
|
|
|
@ -177,7 +177,6 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||
if err != nil && !errdefs.IsConflict(err) {
|
||||
|
@ -213,7 +212,7 @@ func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
|
|||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
||||
try++
|
||||
continue
|
||||
|
|
|
@ -176,11 +176,6 @@ func resolveBuildKitVersion(ver string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//if _, errs := c.Validate(mobyVersion); len(errs) > 0 {
|
||||
// for _, err := range errs {
|
||||
// fmt.Printf("%s: %v\n", m.MobyVersionConstraint, err)
|
||||
// }
|
||||
//}
|
||||
if !c.Check(mobyVersion) {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func (d *Driver) wait(ctx context.Context) error {
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return context.Cause(ctx)
|
||||
case <-timeoutChan:
|
||||
return err
|
||||
case <-ticker.C:
|
||||
|
|
|
@ -47,8 +47,9 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||
return err
|
||||
}
|
||||
return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
ctx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||
return c.Wait(ctx)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -326,7 +326,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
|||
if m.AttachedSessionID() == "" {
|
||||
return nil
|
||||
}
|
||||
invokeCtx, invokeCancel := context.WithCancel(ctx)
|
||||
invokeCtx, invokeCancel := context.WithCancelCause(ctx)
|
||||
|
||||
containerIn, containerOut := ioset.Pipe()
|
||||
m.invokeIO.SetOut(&containerOut)
|
||||
|
@ -336,7 +336,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
|||
cancelOnce.Do(func() {
|
||||
containerIn.Close()
|
||||
m.invokeIO.SetOut(nil)
|
||||
invokeCancel()
|
||||
invokeCancel(errors.WithStack(context.Canceled))
|
||||
})
|
||||
<-waitInvokeDoneCh
|
||||
}
|
||||
|
|
|
@ -978,7 +978,6 @@ func testBakeMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
|||
require.NotNil(t, img)
|
||||
img = imgs.Find("linux/arm64")
|
||||
require.NotNil(t, img)
|
||||
|
||||
} else {
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
|
@ -1468,7 +1467,7 @@ target "third" {
|
|||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
)
|
||||
|
||||
dockerfilePathFirst := filepath.Join("Dockerfile")
|
||||
dockerfilePathFirst := "Dockerfile"
|
||||
dockerfilePathSecond := filepath.Join("subdir", "Dockerfile")
|
||||
dockerfilePathThird := filepath.Join("subdir", "subsubdir", "Dockerfile")
|
||||
|
||||
|
|
|
@ -649,7 +649,6 @@ func testBuildMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
|||
require.NotNil(t, img)
|
||||
img = imgs.Find("linux/arm64")
|
||||
require.NotNil(t, img)
|
||||
|
||||
} else {
|
||||
require.Error(t, err, string(out))
|
||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||
|
|
|
@ -138,7 +138,6 @@ func ParseAnnotations(inp []string) (map[exptypes.AnnotationKey]string, error) {
|
|||
}
|
||||
annotations[ak] = v
|
||||
}
|
||||
|
||||
}
|
||||
return annotations, nil
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
|||
pr, pw := io.Pipe()
|
||||
done := make(chan struct{})
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var w *waitingWriter
|
||||
w = &waitingWriter{
|
||||
PipeWriter: pw,
|
||||
|
@ -67,8 +66,7 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
|||
handleErr(err)
|
||||
}
|
||||
},
|
||||
done: done,
|
||||
cancel: cancel,
|
||||
done: done,
|
||||
}
|
||||
return w, func() {
|
||||
pr.Close()
|
||||
|
@ -101,12 +99,11 @@ func (c *Client) features(ctx context.Context, name string) map[Feature]bool {
|
|||
|
||||
type waitingWriter struct {
|
||||
*io.PipeWriter
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
done chan struct{}
|
||||
cancel func()
|
||||
f func()
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||
|
|
|
@ -55,7 +55,7 @@ func fromReader(l progress.SubLogger, rc io.ReadCloser) error {
|
|||
Started: &now,
|
||||
}
|
||||
}
|
||||
timeDelta := time.Now().Sub(st.Timestamp)
|
||||
timeDelta := time.Since(st.Timestamp)
|
||||
if timeDelta < minTimeDelta {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -25,7 +26,7 @@ func WithAccessToken(token string) GitServeOpt {
|
|||
func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
||||
t.Helper()
|
||||
gitUpdateServerInfo(c, t)
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||
|
||||
gs := &gitServe{}
|
||||
for _, opt := range opts {
|
||||
|
@ -38,7 +39,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
|||
name := "test.git"
|
||||
dir, err := c.GitDir()
|
||||
if err != nil {
|
||||
cancel()
|
||||
cancel(err)
|
||||
}
|
||||
|
||||
var addr string
|
||||
|
@ -84,7 +85,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
|||
<-ready
|
||||
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
cancel(errors.Errorf("cleanup"))
|
||||
<-done
|
||||
})
|
||||
return fmt.Sprintf("http://%s/%s", addr, name)
|
||||
|
|
|
@ -47,7 +47,6 @@ func (f mockFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.Rea
|
|||
reader := io.NopCloser(strings.NewReader(desc.Annotations["test_content"]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r mockResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
|
|
|
@ -61,7 +61,7 @@ func (m *Map) Get(ctx context.Context, keys ...string) (map[string]interface{},
|
|||
m.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return nil, context.Cause(ctx)
|
||||
case <-ch:
|
||||
m.mu.Lock()
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestTimeout(t *testing.T) {
|
|||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
ctx, cancel := context.WithTimeoutCause(context.TODO(), 100*time.Millisecond, nil)
|
||||
defer cancel()
|
||||
|
||||
_, err := m.Get(ctx, "bar")
|
||||
|
|
Loading…
Reference in New Issue