From 347417ee123fa9673faa11347d0f079c49810aa1 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Mon, 9 Jan 2023 16:46:40 +0000 Subject: [PATCH 1/3] build: use copy for BuildWithResultHandler loop vars Signed-off-by: Justin Chadwell --- build/build.go | 227 ++++++++++++++++++++++++------------------------- 1 file changed, 112 insertions(+), 115 deletions(-) diff --git a/build/build.go b/build/build.go index 14193879..81025b03 100644 --- a/build/build.go +++ b/build/build.go @@ -1072,7 +1072,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s }) for i, dp := range dps { - so := *dp.so + i, dp, so := i, dp, *dp.so if multiDriver { for i, e := range so.Exports { switch e.Type { @@ -1101,144 +1101,141 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } } - func(i int, dp driverPair, so client.SolveOpt) { - pw := progress.WithPrefix(w, k, multiTarget) + pw := progress.WithPrefix(w, k, multiTarget) - c := clients[dp.driverIndex] - eg.Go(func() error { - pw = progress.ResetTime(pw) - defer wg.Done() + c := clients[dp.driverIndex] + eg.Go(func() error { + pw = progress.ResetTime(pw) + defer wg.Done() - if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil { + if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil { + return err + } + + frontendInputs := make(map[string]*pb.Definition) + for key, st := range so.FrontendInputs { + def, err := st.Marshal(ctx) + if err != nil { return err } + frontendInputs[key] = def.ToPB() + } - frontendInputs := make(map[string]*pb.Definition) - for key, st := range so.FrontendInputs { - def, err := st.Marshal(ctx) - if err != nil { - return err - } - frontendInputs[key] = def.ToPB() - } + req := gateway.SolveRequest{ + Frontend: so.Frontend, + FrontendInputs: frontendInputs, + FrontendOpt: make(map[string]string), + } + for k, v := range so.FrontendAttrs { + req.FrontendOpt[k] = v + } + so.Frontend = "" + so.FrontendInputs = nil - req := gateway.SolveRequest{ - Frontend: so.Frontend, - FrontendInputs: frontendInputs, - FrontendOpt: make(map[string]string), - } - for k, v := range so.FrontendAttrs { - req.FrontendOpt[k] = v - } - so.Frontend = "" - so.FrontendInputs = nil + ch, done := progress.NewChannel(pw) + defer func() { <-done }() - ch, done := progress.NewChannel(pw) - defer func() { <-done }() - - cc := c - var printRes map[string][]byte - rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - var isFallback bool - var origErr error - for { - if opt.PrintFunc != nil { - if _, ok := req.FrontendOpt["frontend.caps"]; !ok { - req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" - } else { - req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward" - } - req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name - if isFallback { - req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage - } + cc := c + var printRes map[string][]byte + rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + var isFallback bool + var origErr error + for { + if opt.PrintFunc != nil { + if _, ok := req.FrontendOpt["frontend.caps"]; !ok { + req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" + } else { + req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward" } - res, err := c.Solve(ctx, req) - if err != nil { - if origErr != nil { - return nil, err - } - var reqErr *errdefs.UnsupportedSubrequestError - if !isFallback { - if errors.As(err, &reqErr) { - switch reqErr.Name { - case "frontend.outline", "frontend.targets": - isFallback = true - origErr = err - continue - } - return nil, err - } - // buildkit v0.8 vendored in Docker 20.10 does not support typed errors - if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") { + req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name + if isFallback { + req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage + } + } + res, err := c.Solve(ctx, req) + if err != nil { + if origErr != nil { + return nil, err + } + var reqErr *errdefs.UnsupportedSubrequestError + if !isFallback { + if errors.As(err, &reqErr) { + switch reqErr.Name { + case "frontend.outline", "frontend.targets": isFallback = true origErr = err continue } + return nil, err + } + // buildkit v0.8 vendored in Docker 20.10 does not support typed errors + if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") { + isFallback = true + origErr = err + continue } - return nil, err } - if opt.PrintFunc != nil { - printRes = res.Metadata - } - results.Set(resultKey(dp.driverIndex, k), res) - if resultHandleFunc != nil { - resultHandleFunc(dp.driverIndex, &ResultContext{cc, res}) - } - return res, nil + return nil, err } - }, ch) - if err != nil { - return err + if opt.PrintFunc != nil { + printRes = res.Metadata + } + results.Set(resultKey(dp.driverIndex, k), res) + if resultHandleFunc != nil { + resultHandleFunc(dp.driverIndex, &ResultContext{cc, res}) + } + return res, nil } - res[i] = rr + }, ch) + if err != nil { + return err + } + res[i] = rr - if rr.ExporterResponse == nil { - rr.ExporterResponse = map[string]string{} - } - for k, v := range printRes { - rr.ExporterResponse[k] = string(v) - } + if rr.ExporterResponse == nil { + rr.ExporterResponse = map[string]string{} + } + for k, v := range printRes { + rr.ExporterResponse[k] = string(v) + } - node := nodes[dp.driverIndex].Driver - if node.IsMobyDriver() { - for _, e := range so.Exports { - if e.Type == "moby" && e.Attrs["push"] != "" { - if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok { - pushNames = e.Attrs["name"] - if pushNames == "" { - return errors.Errorf("tag is needed when pushing to registry") - } - pw := progress.ResetTime(pw) - pushList := strings.Split(pushNames, ",") - for _, name := range pushList { - if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error { - return pushWithMoby(ctx, node, name, l) - }); err != nil { - return err - } - } - remoteDigest, err := remoteDigestWithMoby(ctx, node, pushList[0]) - if err == nil && remoteDigest != "" { - // old daemons might not have containerimage.config.digest set - // in response so use containerimage.digest value for it if available - if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok { - if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok { - rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v - } - } - rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest - } else if err != nil { + node := nodes[dp.driverIndex].Driver + if node.IsMobyDriver() { + for _, e := range so.Exports { + if e.Type == "moby" && e.Attrs["push"] != "" { + if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok { + pushNames = e.Attrs["name"] + if pushNames == "" { + return errors.Errorf("tag is needed when pushing to registry") + } + pw := progress.ResetTime(pw) + pushList := strings.Split(pushNames, ",") + for _, name := range pushList { + if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error { + return pushWithMoby(ctx, node, name, l) + }); err != nil { return err } } + remoteDigest, err := remoteDigestWithMoby(ctx, node, pushList[0]) + if err == nil && remoteDigest != "" { + // old daemons might not have containerimage.config.digest set + // in response so use containerimage.digest value for it if available + if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok { + if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok { + rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v + } + } + rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest + } else if err != nil { + return err + } } } } - return nil - }) - - }(i, dp, so) + } + return nil + }) } return nil From 1180d919f57a05fbeaac30790ffed9b620a8f8f3 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Mon, 9 Jan 2023 16:48:29 +0000 Subject: [PATCH 2/3] build: reorder error group funcs Signed-off-by: Justin Chadwell --- build/build.go | 234 ++++++++++++++++++++++++------------------------- 1 file changed, 117 insertions(+), 117 deletions(-) diff --git a/build/build.go b/build/build.go index 81025b03..b4c0d246 100644 --- a/build/build.go +++ b/build/build.go @@ -954,123 +954,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s var pushNames string var insecurePush bool - eg.Go(func() (err error) { - defer func() { - if span != nil { - tracing.FinishWithError(span, err) - } - }() - pw := progress.WithPrefix(w, "default", false) - wg.Wait() - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - respMu.Lock() - resp[k] = res[0] - respMu.Unlock() - if len(res) == 1 { - dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey] - if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok { - dgst = v - } - if opt.ImageIDFile != "" { - return os.WriteFile(opt.ImageIDFile, []byte(dgst), 0644) - } - return nil - } - - if pushNames != "" { - progress.Write(pw, fmt.Sprintf("merging manifest list %s", pushNames), func() error { - descs := make([]specs.Descriptor, 0, len(res)) - - for _, r := range res { - s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey] - if ok { - descs = append(descs, specs.Descriptor{ - Digest: digest.Digest(s), - MediaType: images.MediaTypeDockerSchema2ManifestList, - Size: -1, - }) - } - } - if len(descs) > 0 { - var imageopt imagetools.Opt - for _, dp := range dps { - imageopt = nodes[dp.driverIndex].ImageOpt - break - } - names := strings.Split(pushNames, ",") - - if insecurePush { - insecureTrue := true - httpTrue := true - nn, err := reference.ParseNormalizedNamed(names[0]) - if err != nil { - return err - } - imageopt.RegistryConfig = map[string]resolver.RegistryConfig{ - reference.Domain(nn): { - Insecure: &insecureTrue, - PlainHTTP: &httpTrue, - }, - } - } - - itpull := imagetools.New(imageopt) - - ref, err := reference.ParseNormalizedNamed(names[0]) - if err != nil { - return err - } - ref = reference.TagNameOnly(ref) - - srcs := make([]*imagetools.Source, len(descs)) - for i, desc := range descs { - srcs[i] = &imagetools.Source{ - Desc: desc, - Ref: ref, - } - } - - dt, desc, err := itpull.Combine(ctx, srcs) - if err != nil { - return err - } - if opt.ImageIDFile != "" { - if err := os.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil { - return err - } - } - - itpush := imagetools.New(imageopt) - - for _, n := range names { - nn, err := reference.ParseNormalizedNamed(n) - if err != nil { - return err - } - if err := itpush.Push(ctx, nn, desc, dt); err != nil { - return err - } - } - - respMu.Lock() - resp[k] = &client.SolveResponse{ - ExporterResponse: map[string]string{ - "containerimage.digest": desc.Digest.String(), - }, - } - respMu.Unlock() - } - return nil - }) - } - return nil - }) - for i, dp := range dps { i, dp, so := i, dp, *dp.so if multiDriver { @@ -1238,6 +1121,123 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s }) } + eg.Go(func() (err error) { + defer func() { + if span != nil { + tracing.FinishWithError(span, err) + } + }() + pw := progress.WithPrefix(w, "default", false) + wg.Wait() + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + respMu.Lock() + resp[k] = res[0] + respMu.Unlock() + if len(res) == 1 { + dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey] + if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok { + dgst = v + } + if opt.ImageIDFile != "" { + return os.WriteFile(opt.ImageIDFile, []byte(dgst), 0644) + } + return nil + } + + if pushNames != "" { + progress.Write(pw, fmt.Sprintf("merging manifest list %s", pushNames), func() error { + descs := make([]specs.Descriptor, 0, len(res)) + + for _, r := range res { + s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey] + if ok { + descs = append(descs, specs.Descriptor{ + Digest: digest.Digest(s), + MediaType: images.MediaTypeDockerSchema2ManifestList, + Size: -1, + }) + } + } + if len(descs) > 0 { + var imageopt imagetools.Opt + for _, dp := range dps { + imageopt = nodes[dp.driverIndex].ImageOpt + break + } + names := strings.Split(pushNames, ",") + + if insecurePush { + insecureTrue := true + httpTrue := true + nn, err := reference.ParseNormalizedNamed(names[0]) + if err != nil { + return err + } + imageopt.RegistryConfig = map[string]resolver.RegistryConfig{ + reference.Domain(nn): { + Insecure: &insecureTrue, + PlainHTTP: &httpTrue, + }, + } + } + + itpull := imagetools.New(imageopt) + + ref, err := reference.ParseNormalizedNamed(names[0]) + if err != nil { + return err + } + ref = reference.TagNameOnly(ref) + + srcs := make([]*imagetools.Source, len(descs)) + for i, desc := range descs { + srcs[i] = &imagetools.Source{ + Desc: desc, + Ref: ref, + } + } + + dt, desc, err := itpull.Combine(ctx, srcs) + if err != nil { + return err + } + if opt.ImageIDFile != "" { + if err := os.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil { + return err + } + } + + itpush := imagetools.New(imageopt) + + for _, n := range names { + nn, err := reference.ParseNormalizedNamed(n) + if err != nil { + return err + } + if err := itpush.Push(ctx, nn, desc, dt); err != nil { + return err + } + } + + respMu.Lock() + resp[k] = &client.SolveResponse{ + ExporterResponse: map[string]string{ + "containerimage.digest": desc.Digest.String(), + }, + } + respMu.Unlock() + } + return nil + }) + } + return nil + }) + return nil }(k) if err != nil { From 8b7aa1a168c303521cfb6613c52480cd71f1ef20 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Tue, 10 Jan 2023 11:02:26 +0000 Subject: [PATCH 3/3] build: create error group per opt Using the syncronization primitive, we can avoid needing to create a separate wait group. This allows us to sidestep the issue where the wait group could be completed, but the build invocation functions had not terminated - if one of the functions was to terminate with an error, then it was possible to encounter a race condition, where the result handling code would begin executing, despite an error. The refactor to use a separate error group which more elegantly handles the concept of function returns and errors, ensures that we can't encounter this issue. Signed-off-by: Justin Chadwell --- build/build.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/build/build.go b/build/build.go index b4c0d246..f91d8008 100644 --- a/build/build.go +++ b/build/build.go @@ -946,10 +946,10 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s if multiTarget { span, ctx = tracing.StartSpan(ctx, k) } + baseCtx := ctx res := make([]*client.SolveResponse, len(dps)) - wg := &sync.WaitGroup{} - wg.Add(len(dps)) + eg2, ctx := errgroup.WithContext(ctx) var pushNames string var insecurePush bool @@ -987,9 +987,8 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s pw := progress.WithPrefix(w, k, multiTarget) c := clients[dp.driverIndex] - eg.Go(func() error { + eg2.Go(func() error { pw = progress.ResetTime(pw) - defer wg.Done() if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil { return err @@ -1122,17 +1121,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } eg.Go(func() (err error) { + ctx := baseCtx defer func() { if span != nil { tracing.FinishWithError(span, err) } }() pw := progress.WithPrefix(w, "default", false) - wg.Wait() - select { - case <-ctx.Done(): - return ctx.Err() - default: + if err := eg2.Wait(); err != nil { + return err } respMu.Lock()