mirror of https://github.com/docker/buildx.git
Merge pull request #2798 from tonistiigi/linter-updates
Improve linter checks
This commit is contained in:
commit
26bbddb5d6
|
@ -5,22 +5,45 @@ run:
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- gofmt
|
- bodyclose
|
||||||
- govet
|
|
||||||
- depguard
|
- depguard
|
||||||
|
- forbidigo
|
||||||
|
- gocritic
|
||||||
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- makezero
|
||||||
- misspell
|
- misspell
|
||||||
- unused
|
- noctx
|
||||||
|
- nolintlint
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- typecheck
|
- typecheck
|
||||||
- nolintlint
|
- unused
|
||||||
- gosec
|
- whitespace
|
||||||
- forbidigo
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
gocritic:
|
||||||
|
disabled-checks:
|
||||||
|
- "ifElseChain"
|
||||||
|
- "assignOp"
|
||||||
|
- "appendAssign"
|
||||||
|
- "singleCaseSwitch"
|
||||||
|
- "exitAfterDefer" # FIXME
|
||||||
|
importas:
|
||||||
|
alias:
|
||||||
|
# Enforce alias to prevent it accidentally being used instead of
|
||||||
|
# buildkit errdefs package (or vice-versa).
|
||||||
|
- pkg: "github.com/containerd/errdefs"
|
||||||
|
alias: "cerrdefs"
|
||||||
|
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
alias: "ocispecs"
|
||||||
|
- pkg: "github.com/opencontainers/go-digest"
|
||||||
|
alias: "digest"
|
||||||
govet:
|
govet:
|
||||||
enable:
|
enable:
|
||||||
- nilness
|
- nilness
|
||||||
|
@ -43,6 +66,10 @@ linters-settings:
|
||||||
desc: The io/ioutil package has been deprecated.
|
desc: The io/ioutil package has been deprecated.
|
||||||
forbidigo:
|
forbidigo:
|
||||||
forbid:
|
forbid:
|
||||||
|
- '^context\.WithCancel(# use context\.WithCancelCause instead)?$'
|
||||||
|
- '^context\.WithDeadline(# use context\.WithDeadline instead)?$'
|
||||||
|
- '^context\.WithTimeout(# use context\.WithTimeoutCause instead)?$'
|
||||||
|
- '^ctx\.Err(# use context\.Cause instead)?$'
|
||||||
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
- '^fmt\.Errorf(# use errors\.Errorf instead)?$'
|
||||||
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
- '^platforms\.DefaultString(# use platforms\.Format(platforms\.DefaultSpec()) instead\.)?$'
|
||||||
gosec:
|
gosec:
|
||||||
|
|
|
@ -179,7 +179,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
}
|
}
|
||||||
c.Groups = append(c.Groups, g)
|
c.Groups = append(c.Groups, g)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c, nil
|
return &c, nil
|
||||||
|
|
|
@ -170,7 +170,6 @@ func indexOfFunc() function.Function {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cty.NilVal, errors.New("item not found")
|
return cty.NilVal, errors.New("item not found")
|
||||||
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
type Container struct {
|
type Container struct {
|
||||||
cancelOnce sync.Once
|
cancelOnce sync.Once
|
||||||
containerCancel func()
|
containerCancel func(error)
|
||||||
isUnavailable atomic.Bool
|
isUnavailable atomic.Bool
|
||||||
initStarted atomic.Bool
|
initStarted atomic.Bool
|
||||||
container gateway.Container
|
container gateway.Container
|
||||||
|
@ -31,18 +31,18 @@ func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllera
|
||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
err := resultCtx.build(func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
go func() {
|
go func() {
|
||||||
<-mainCtx.Done()
|
<-mainCtx.Done()
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
containerCfg, err := resultCtx.getContainerConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
containerCtx, containerCancel := context.WithCancel(ctx)
|
containerCtx, containerCancel := context.WithCancelCause(ctx)
|
||||||
defer containerCancel()
|
defer containerCancel(errors.WithStack(context.Canceled))
|
||||||
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
bkContainer, err := c.NewContainer(containerCtx, containerCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -83,7 +83,7 @@ func (c *Container) Cancel() {
|
||||||
c.markUnavailable()
|
c.markUnavailable()
|
||||||
c.cancelOnce.Do(func() {
|
c.cancelOnce.Do(func() {
|
||||||
if c.containerCancel != nil {
|
if c.containerCancel != nil {
|
||||||
c.containerCancel()
|
c.containerCancel(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
close(c.releaseCh)
|
close(c.releaseCh)
|
||||||
})
|
})
|
||||||
|
|
|
@ -82,7 +82,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
||||||
var respHandle *ResultHandle
|
var respHandle *ResultHandle
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer cancel(context.Canceled) // ensure no dangling processes
|
defer func() { cancel(errors.WithStack(context.Canceled)) }() // ensure no dangling processes
|
||||||
|
|
||||||
var res *gateway.Result
|
var res *gateway.Result
|
||||||
var err error
|
var err error
|
||||||
|
@ -181,7 +181,7 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt
|
||||||
case <-respHandle.done:
|
case <-respHandle.done:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
}, nil)
|
}, nil)
|
||||||
if respHandle != nil {
|
if respHandle != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -288,7 +288,15 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
builders := make([]*Builder, len(storeng))
|
contexts, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Slice(contexts, func(i, j int) bool {
|
||||||
|
return contexts[i].Name < contexts[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
builders := make([]*Builder, len(storeng), len(storeng)+len(contexts))
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for i, ng := range storeng {
|
for i, ng := range storeng {
|
||||||
b, err := New(dockerCli,
|
b, err := New(dockerCli,
|
||||||
|
@ -303,14 +311,6 @@ func GetBuilders(dockerCli command.Cli, txn *store.Txn) ([]*Builder, error) {
|
||||||
seen[b.NodeGroup.Name] = struct{}{}
|
seen[b.NodeGroup.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
|
||||||
return contexts[i].Name < contexts[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, c := range contexts {
|
for _, c := range contexts {
|
||||||
// if a context has the same name as an instance from the store, do not
|
// if a context has the same name as an instance from the store, do not
|
||||||
// add it to the builders list. An instance from the store takes
|
// add it to the builders list. An instance from the store takes
|
||||||
|
@ -522,8 +522,9 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, WithData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -108,8 +108,8 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer cancel(errors.WithStack(context.Canceled))
|
||||||
|
|
||||||
var nodes []builder.Node
|
var nodes []builder.Node
|
||||||
var progressConsoleDesc, progressTextDesc string
|
var progressConsoleDesc, progressTextDesc string
|
||||||
|
|
|
@ -325,8 +325,8 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
||||||
}
|
}
|
||||||
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
attributes := buildMetricAttributes(dockerCli, driverType, &options)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
progressMode, err := options.toDisplayMode()
|
progressMode, err := options.toDisplayMode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -885,7 +885,6 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui
|
||||||
src.Print(w)
|
src.Print(w)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "\n")
|
fmt.Fprintf(w, "\n")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||||
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
return errors.Errorf("can't push with no tags specified, please set --tag or --dry-run")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileArgs := make([]string, len(in.files))
|
fileArgs := make([]string, len(in.files), len(in.files)+len(args))
|
||||||
for i, f := range in.files {
|
for i, f := range in.files {
|
||||||
dt, err := os.ReadFile(f)
|
dt, err := os.ReadFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -173,8 +173,8 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
|
||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imageopt)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancelCause(context.TODO())
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
printer, err := progress.NewPrinter(ctx2, os.Stderr, progressui.DisplayMode(in.progress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/debug"
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,8 +35,9 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
@ -57,8 +58,9 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
|
@ -319,7 +321,7 @@ func (tp truncatedPlatforms) String() string {
|
||||||
if tpf, ok := tp.res[mpf]; ok {
|
if tpf, ok := tp.res[mpf]; ok {
|
||||||
seen[mpf] = struct{}{}
|
seen[mpf] = struct{}{}
|
||||||
if len(tpf) == 1 {
|
if len(tpf) == 1 {
|
||||||
out = append(out, fmt.Sprintf("%s", tpf[0]))
|
out = append(out, tpf[0])
|
||||||
count++
|
count++
|
||||||
} else {
|
} else {
|
||||||
hasPreferredPlatform := false
|
hasPreferredPlatform := false
|
||||||
|
@ -347,7 +349,7 @@ func (tp truncatedPlatforms) String() string {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(tp.res[mpf]) == 1 {
|
if len(tp.res[mpf]) == 1 {
|
||||||
out = append(out, fmt.Sprintf("%s", tp.res[mpf][0]))
|
out = append(out, tp.res[mpf][0])
|
||||||
count++
|
count++
|
||||||
} else {
|
} else {
|
||||||
hasPreferredPlatform := false
|
hasPreferredPlatform := false
|
||||||
|
|
|
@ -150,8 +150,9 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
timeoutCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
|
|
|
@ -46,7 +46,6 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
||||||
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
return errors.Errorf("run `docker context use %s` to switch to context %s", in.builder, in.builder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
||||||
|
|
||||||
// Attach containerIn to this process
|
// Attach containerIn to this process
|
||||||
ioCancelledCh := make(chan struct{})
|
ioCancelledCh := make(chan struct{})
|
||||||
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func() { close(ioCancelledCh) })
|
proc.ForwardIO(&ioset.In{Stdin: ioIn, Stdout: ioOut, Stderr: ioErr}, func(error) { close(ioCancelledCh) })
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ioCancelledCh:
|
case <-ioCancelledCh:
|
||||||
|
@ -117,7 +117,7 @@ func (b *localController) Invoke(ctx context.Context, sessionID string, pid stri
|
||||||
case err := <-proc.Done():
|
case err := <-proc.Done():
|
||||||
return err
|
return err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,6 @@ func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ps = append(ps, p)
|
ps = append(ps, p)
|
||||||
|
|
||||||
}
|
}
|
||||||
s.Paths = ps
|
s.Paths = ps
|
||||||
ssh = append(ssh, s)
|
ssh = append(ssh, s)
|
||||||
|
|
|
@ -22,9 +22,7 @@ func (w *writer) Write(status *client.SolveStatus) {
|
||||||
w.ch <- ToControlStatus(status)
|
w.ch <- ToControlStatus(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) WriteBuildRef(target string, ref string) {
|
func (w *writer) WriteBuildRef(target string, ref string) {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -18,16 +18,16 @@ type Process struct {
|
||||||
invokeConfig *pb.InvokeConfig
|
invokeConfig *pb.InvokeConfig
|
||||||
errCh chan error
|
errCh chan error
|
||||||
processCancel func()
|
processCancel func()
|
||||||
serveIOCancel func()
|
serveIOCancel func(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForwardIO forwards process's io to the specified reader/writer.
|
// ForwardIO forwards process's io to the specified reader/writer.
|
||||||
// Optionally specify ioCancelCallback which will be called when
|
// Optionally specify ioCancelCallback which will be called when
|
||||||
// the process closes the specified IO. This will be useful for additional cleanup.
|
// the process closes the specified IO. This will be useful for additional cleanup.
|
||||||
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func()) {
|
func (p *Process) ForwardIO(in *ioset.In, ioCancelCallback func(error)) {
|
||||||
p.inEnd.SetIn(in)
|
p.inEnd.SetIn(in)
|
||||||
if f := p.serveIOCancel; f != nil {
|
if f := p.serveIOCancel; f != nil {
|
||||||
f()
|
f(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
p.serveIOCancel = ioCancelCallback
|
p.serveIOCancel = ioCancelCallback
|
||||||
}
|
}
|
||||||
|
@ -124,9 +124,16 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
|
||||||
f.SetOut(&out)
|
f.SetOut(&out)
|
||||||
|
|
||||||
// Register process
|
// Register process
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||||
var cancelOnce sync.Once
|
var cancelOnce sync.Once
|
||||||
processCancelFunc := func() { cancelOnce.Do(func() { cancel(); f.Close(); in.Close(); out.Close() }) }
|
processCancelFunc := func() {
|
||||||
|
cancelOnce.Do(func() {
|
||||||
|
cancel(errors.WithStack(context.Canceled))
|
||||||
|
f.Close()
|
||||||
|
in.Close()
|
||||||
|
out.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
p := &Process{
|
p := &Process{
|
||||||
inEnd: f,
|
inEnd: f,
|
||||||
invokeConfig: cfg,
|
invokeConfig: cfg,
|
||||||
|
|
|
@ -62,9 +62,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
||||||
serverRoot := filepath.Join(rootDir, "shared")
|
serverRoot := filepath.Join(rootDir, "shared")
|
||||||
|
|
||||||
// connect to buildx server if it is already running
|
// connect to buildx server if it is already running
|
||||||
ctx2, cancel := context.WithTimeout(ctx, 1*time.Second)
|
ctx2, cancel := context.WithCancelCause(ctx)
|
||||||
|
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, context.DeadlineExceeded) {
|
if !errors.Is(err, context.DeadlineExceeded) {
|
||||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
||||||
|
@ -90,9 +91,10 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
||||||
go wait()
|
go wait()
|
||||||
|
|
||||||
// wait for buildx server to be ready
|
// wait for buildx server to be ready
|
||||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
ctx2, cancel = context.WithCancelCause(ctx)
|
||||||
|
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||||
cancel()
|
cancel(errors.WithStack(context.Canceled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
return errors.Wrap(err, "cannot connect to the buildx server")
|
||||||
}
|
}
|
||||||
|
|
|
@ -302,7 +302,6 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage
|
||||||
out = cfg.stderr
|
out = cfg.stderr
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("unsupported fd %d", file.Fd)
|
return errors.Errorf("unsupported fd %d", file.Fd)
|
||||||
|
|
||||||
}
|
}
|
||||||
if out == nil {
|
if out == nil {
|
||||||
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
|
||||||
|
@ -345,7 +344,7 @@ func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
return nil, err
|
return nil, err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ type Server struct {
|
||||||
type session struct {
|
type session struct {
|
||||||
buildOnGoing atomic.Bool
|
buildOnGoing atomic.Bool
|
||||||
statusChan chan *pb.StatusResponse
|
statusChan chan *pb.StatusResponse
|
||||||
cancelBuild func()
|
cancelBuild func(error)
|
||||||
buildOptions *pb.BuildOptions
|
buildOptions *pb.BuildOptions
|
||||||
inputPipe *io.PipeWriter
|
inputPipe *io.PipeWriter
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res
|
||||||
m.sessionMu.Lock()
|
m.sessionMu.Lock()
|
||||||
if s, ok := m.session[sessionID]; ok {
|
if s, ok := m.session[sessionID]; ok {
|
||||||
if s.cancelBuild != nil {
|
if s.cancelBuild != nil {
|
||||||
s.cancelBuild()
|
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
s.cancelRunningProcesses()
|
s.cancelRunningProcesses()
|
||||||
if s.result != nil {
|
if s.result != nil {
|
||||||
|
@ -127,7 +127,7 @@ func (m *Server) Close() error {
|
||||||
for k := range m.session {
|
for k := range m.session {
|
||||||
if s, ok := m.session[k]; ok {
|
if s, ok := m.session[k]; ok {
|
||||||
if s.cancelBuild != nil {
|
if s.cancelBuild != nil {
|
||||||
s.cancelBuild()
|
s.cancelBuild(errors.WithStack(context.Canceled))
|
||||||
}
|
}
|
||||||
s.cancelRunningProcesses()
|
s.cancelRunningProcesses()
|
||||||
}
|
}
|
||||||
|
@ -199,8 +199,8 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
||||||
pw := pb.NewProgressWriter(statusChan)
|
pw := pb.NewProgressWriter(statusChan)
|
||||||
|
|
||||||
// Build the specified request
|
// Build the specified request
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||||
m.sessionMu.Lock()
|
m.sessionMu.Lock()
|
||||||
if s, ok := m.session[sessionID]; ok {
|
if s, ok := m.session[sessionID]; ok {
|
||||||
|
@ -341,7 +341,7 @@ func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
|
||||||
select {
|
select {
|
||||||
case msg = <-msgCh:
|
case msg = <-msgCh:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return errors.Wrap(ctx.Err(), "canceled")
|
return context.Cause(ctx)
|
||||||
}
|
}
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -370,9 +370,9 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
||||||
initDoneCh := make(chan *processes.Process)
|
initDoneCh := make(chan *processes.Process)
|
||||||
initErrCh := make(chan error)
|
initErrCh := make(chan error)
|
||||||
eg, egCtx := errgroup.WithContext(context.TODO())
|
eg, egCtx := errgroup.WithContext(context.TODO())
|
||||||
srvIOCtx, srvIOCancel := context.WithCancel(egCtx)
|
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer srvIOCancel()
|
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||||
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
|
@ -418,7 +418,7 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
eg.Go(func() (rErr error) {
|
eg.Go(func() (rErr error) {
|
||||||
defer srvIOCancel()
|
defer srvIOCancel(errors.WithStack(context.Canceled))
|
||||||
// Wait for init done
|
// Wait for init done
|
||||||
var proc *processes.Process
|
var proc *processes.Process
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -177,7 +177,6 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||||
if err != nil && !errdefs.IsConflict(err) {
|
if err != nil && !errdefs.IsConflict(err) {
|
||||||
|
@ -213,7 +212,7 @@ func (d *Driver) wait(ctx context.Context, l progress.SubLogger) error {
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
case <-time.After(time.Duration(try*120) * time.Millisecond):
|
||||||
try++
|
try++
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -176,11 +176,6 @@ func resolveBuildKitVersion(ver string) (string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
//if _, errs := c.Validate(mobyVersion); len(errs) > 0 {
|
|
||||||
// for _, err := range errs {
|
|
||||||
// fmt.Printf("%s: %v\n", m.MobyVersionConstraint, err)
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
if !c.Check(mobyVersion) {
|
if !c.Check(mobyVersion) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (d *Driver) wait(ctx context.Context) error {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(ctx)
|
||||||
case <-timeoutChan:
|
case <-timeoutChan:
|
||||||
return err
|
return err
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
|
|
|
@ -47,8 +47,9 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error {
|
return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error {
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
defer cancel()
|
ctx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded))
|
||||||
|
defer func() { cancel(errors.WithStack(context.Canceled)) }()
|
||||||
return c.Wait(ctx)
|
return c.Wait(ctx)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -326,7 +326,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
||||||
if m.AttachedSessionID() == "" {
|
if m.AttachedSessionID() == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
invokeCtx, invokeCancel := context.WithCancel(ctx)
|
invokeCtx, invokeCancel := context.WithCancelCause(ctx)
|
||||||
|
|
||||||
containerIn, containerOut := ioset.Pipe()
|
containerIn, containerOut := ioset.Pipe()
|
||||||
m.invokeIO.SetOut(&containerOut)
|
m.invokeIO.SetOut(&containerOut)
|
||||||
|
@ -336,7 +336,7 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
||||||
cancelOnce.Do(func() {
|
cancelOnce.Do(func() {
|
||||||
containerIn.Close()
|
containerIn.Close()
|
||||||
m.invokeIO.SetOut(nil)
|
m.invokeIO.SetOut(nil)
|
||||||
invokeCancel()
|
invokeCancel(errors.WithStack(context.Canceled))
|
||||||
})
|
})
|
||||||
<-waitInvokeDoneCh
|
<-waitInvokeDoneCh
|
||||||
}
|
}
|
||||||
|
|
|
@ -978,7 +978,6 @@ func testBakeMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
||||||
require.NotNil(t, img)
|
require.NotNil(t, img)
|
||||||
img = imgs.Find("linux/arm64")
|
img = imgs.Find("linux/arm64")
|
||||||
require.NotNil(t, img)
|
require.NotNil(t, img)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
require.Error(t, err, string(out))
|
require.Error(t, err, string(out))
|
||||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||||
|
@ -1468,7 +1467,7 @@ target "third" {
|
||||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||||
)
|
)
|
||||||
|
|
||||||
dockerfilePathFirst := filepath.Join("Dockerfile")
|
dockerfilePathFirst := "Dockerfile"
|
||||||
dockerfilePathSecond := filepath.Join("subdir", "Dockerfile")
|
dockerfilePathSecond := filepath.Join("subdir", "Dockerfile")
|
||||||
dockerfilePathThird := filepath.Join("subdir", "subsubdir", "Dockerfile")
|
dockerfilePathThird := filepath.Join("subdir", "subsubdir", "Dockerfile")
|
||||||
|
|
||||||
|
|
|
@ -649,7 +649,6 @@ func testBuildMultiPlatform(t *testing.T, sb integration.Sandbox) {
|
||||||
require.NotNil(t, img)
|
require.NotNil(t, img)
|
||||||
img = imgs.Find("linux/arm64")
|
img = imgs.Find("linux/arm64")
|
||||||
require.NotNil(t, img)
|
require.NotNil(t, img)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
require.Error(t, err, string(out))
|
require.Error(t, err, string(out))
|
||||||
require.Contains(t, string(out), "Multi-platform build is not supported")
|
require.Contains(t, string(out), "Multi-platform build is not supported")
|
||||||
|
|
|
@ -138,7 +138,6 @@ func ParseAnnotations(inp []string) (map[exptypes.AnnotationKey]string, error) {
|
||||||
}
|
}
|
||||||
annotations[ak] = v
|
annotations[ak] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return annotations, nil
|
return annotations, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,6 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
var w *waitingWriter
|
var w *waitingWriter
|
||||||
w = &waitingWriter{
|
w = &waitingWriter{
|
||||||
PipeWriter: pw,
|
PipeWriter: pw,
|
||||||
|
@ -68,7 +67,6 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
done: done,
|
done: done,
|
||||||
cancel: cancel,
|
|
||||||
}
|
}
|
||||||
return w, func() {
|
return w, func() {
|
||||||
pr.Close()
|
pr.Close()
|
||||||
|
@ -106,7 +104,6 @@ type waitingWriter struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
err error
|
err error
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
cancel func()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
func (w *waitingWriter) Write(dt []byte) (int, error) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ func fromReader(l progress.SubLogger, rc io.ReadCloser) error {
|
||||||
Started: &now,
|
Started: &now,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
timeDelta := time.Now().Sub(st.Timestamp)
|
timeDelta := time.Since(st.Timestamp)
|
||||||
if timeDelta < minTimeDelta {
|
if timeDelta < minTimeDelta {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ func WithAccessToken(token string) GitServeOpt {
|
||||||
func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
gitUpdateServerInfo(c, t)
|
gitUpdateServerInfo(c, t)
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancelCause(context.TODO())
|
||||||
|
|
||||||
gs := &gitServe{}
|
gs := &gitServe{}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
|
@ -38,7 +39,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
||||||
name := "test.git"
|
name := "test.git"
|
||||||
dir, err := c.GitDir()
|
dir, err := c.GitDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr string
|
var addr string
|
||||||
|
@ -84,7 +85,7 @@ func GitServeHTTP(c *Git, t testing.TB, opts ...GitServeOpt) (url string) {
|
||||||
<-ready
|
<-ready
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
cancel()
|
cancel(errors.Errorf("cleanup"))
|
||||||
<-done
|
<-done
|
||||||
})
|
})
|
||||||
return fmt.Sprintf("http://%s/%s", addr, name)
|
return fmt.Sprintf("http://%s/%s", addr, name)
|
||||||
|
|
|
@ -47,7 +47,6 @@ func (f mockFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.Rea
|
||||||
reader := io.NopCloser(strings.NewReader(desc.Annotations["test_content"]))
|
reader := io.NopCloser(strings.NewReader(desc.Annotations["test_content"]))
|
||||||
return reader, nil
|
return reader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r mockResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
func (r mockResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||||
|
|
|
@ -61,7 +61,7 @@ func (m *Map) Get(ctx context.Context, keys ...string) (map[string]interface{},
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ctx.Err()
|
return nil, context.Cause(ctx)
|
||||||
case <-ch:
|
case <-ch:
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func TestTimeout(t *testing.T) {
|
||||||
|
|
||||||
m.Set("foo", "bar")
|
m.Set("foo", "bar")
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
ctx, cancel := context.WithTimeoutCause(context.TODO(), 100*time.Millisecond, nil)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_, err := m.Get(ctx, "bar")
|
_, err := m.Get(ctx, "bar")
|
||||||
|
|
Loading…
Reference in New Issue