mirror of https://github.com/docker/buildx.git
Merge pull request #2551 from crazy-max/metadata-warnings-2
build: opt to set progress warnings in response
This commit is contained in:
commit
d8c9ebde1f
|
@ -84,6 +84,8 @@ jobs:
|
|||
endpoint: tcp://localhost:1234
|
||||
- driver: docker-container
|
||||
metadata-provenance: max
|
||||
- driver: docker-container
|
||||
metadata-warnings: true
|
||||
exclude:
|
||||
- driver: docker
|
||||
multi-node: mnode-true
|
||||
|
@ -134,6 +136,9 @@ jobs:
|
|||
if [ -n "${{ matrix.metadata-provenance }}" ]; then
|
||||
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ -n "${{ matrix.metadata-warnings }}" ]; then
|
||||
echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
|
||||
fi
|
||||
-
|
||||
name: Install k3s
|
||||
if: matrix.driver == 'kubernetes'
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -130,15 +131,30 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
return err
|
||||
}
|
||||
|
||||
var resp map[string]*client.SolveResponse
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err == nil && progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
|
||||
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
|
||||
}
|
||||
if resp != nil && len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
err = writeMetadataFile(in.metadataFile, dt)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -229,22 +245,12 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
|||
return err
|
||||
}
|
||||
|
||||
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
resp, err = build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return wrapBuildError(err, true)
|
||||
}
|
||||
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
|
|
@ -374,7 +374,11 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
|
|||
return err
|
||||
}
|
||||
} else if options.metadataFile != "" {
|
||||
if err := writeMetadataFile(options.metadataFile, decodeExporterResponse(resp.ExporterResponse)); err != nil {
|
||||
dt := decodeExporterResponse(resp.ExporterResponse)
|
||||
if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
|
||||
dt["buildx.build.warnings"] = warnings
|
||||
}
|
||||
if err := writeMetadataFile(options.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,6 +119,7 @@ $ cat metadata.json
|
|||
|
||||
```json
|
||||
{
|
||||
"buildx.build.warnings": {},
|
||||
"db": {
|
||||
"buildx.build.provenance": {},
|
||||
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
|
||||
|
@ -161,6 +162,12 @@ $ cat metadata.json
|
|||
> * `max` sets full provenance.
|
||||
> * `disabled`, `false` or `0` does not set any provenance.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="no-cache"></a> Don't use cache when building the image (--no-cache)
|
||||
|
||||
Same as `build --no-cache`. Don't use cache when building the image.
|
||||
|
|
|
@ -330,6 +330,7 @@ $ cat metadata.json
|
|||
{
|
||||
"buildx.build.provenance": {},
|
||||
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
|
||||
"buildx.build.warnings": {},
|
||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"containerimage.descriptor": {
|
||||
"annotations": {
|
||||
|
@ -353,6 +354,12 @@ $ cat metadata.json
|
|||
> * `max` sets full provenance.
|
||||
> * `disabled`, `false` or `0` does not set any provenance.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Build warnings (`buildx.build.warnings`) are not included by default. Set the
|
||||
> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
|
||||
> include them.
|
||||
|
||||
### <a name="no-cache-filter"></a> Ignore build cache for specific stages (--no-cache-filter)
|
||||
|
||||
The `--no-cache-filter` lets you specify one or more stages of a multi-stage
|
||||
|
|
1
go.mod
1
go.mod
|
@ -27,6 +27,7 @@ require (
|
|||
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
|
||||
github.com/hashicorp/hcl/v2 v2.20.1
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
github.com/moby/buildkit v0.14.1
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
|
|
2
go.sum
2
go.sum
|
@ -295,6 +295,8 @@ github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
|
|||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
|
|
|
@ -109,21 +109,21 @@ buildxCmd inspect --bootstrap --builder="${builderName}"
|
|||
|
||||
# create dockerfile
|
||||
cat > "${dockerfile}" <<EOL
|
||||
FROM busybox as build
|
||||
fRom busybox as build
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
||||
|
||||
FROM busybox AS log
|
||||
FROM busybox As log
|
||||
COPY --from=build /log /log
|
||||
RUN cat /log
|
||||
RUN uname -a
|
||||
|
||||
FROM busybox AS hello
|
||||
FROm busybox AS hello
|
||||
RUN echo hello > /hello
|
||||
|
||||
FROM scratch
|
||||
COPY --from=log /log /log
|
||||
CoPY --from=log /log /log
|
||||
COPY --from=hello /hello /hello
|
||||
EOL
|
||||
|
||||
|
|
144
tests/bake.go
144
tests/bake.go
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/docker/buildx/util/gitutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
|
@ -42,7 +43,9 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){
|
|||
testBakeEmpty,
|
||||
testBakeShmSize,
|
||||
testBakeUlimits,
|
||||
testBakeMetadata,
|
||||
testBakeMetadataProvenance,
|
||||
testBakeMetadataWarnings,
|
||||
testBakeMetadataWarningsDedup,
|
||||
testBakeMultiExporters,
|
||||
testBakeLoadPush,
|
||||
}
|
||||
|
@ -633,19 +636,22 @@ target "default" {
|
|||
require.Contains(t, string(dt), `1024`)
|
||||
}
|
||||
|
||||
func testBakeMetadata(t *testing.T, sb integration.Sandbox) {
|
||||
func testBakeMetadataProvenance(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bakeMetadataProvenance(t, sb, "")
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "max")
|
||||
bakeMetadataProvenance(t, sb, "max")
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "min")
|
||||
bakeMetadataProvenance(t, sb, "min")
|
||||
})
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
bakeMetadata(t, sb, "disabled")
|
||||
bakeMetadataProvenance(t, sb, "disabled")
|
||||
})
|
||||
}
|
||||
|
||||
func bakeMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
func bakeMetadataProvenance(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
COPY foo /foo
|
||||
|
@ -676,7 +682,7 @@ target "default" {
|
|||
withEnv("BUILDX_METADATA_PROVENANCE="+metadataMode),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, out)
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
@ -706,6 +712,130 @@ target "default" {
|
|||
require.Equal(t, provenancetypes.BuildKitBuildType, prv.BuildType)
|
||||
}
|
||||
|
||||
func testBakeMetadataWarnings(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "")
|
||||
})
|
||||
t.Run("true", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "true")
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
bakeMetadataWarnings(t, sb, "false")
|
||||
})
|
||||
}
|
||||
|
||||
func bakeMetadataWarnings(t *testing.T, sb integration.Sandbox, mode string) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
target "default" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
dirDest := t.TempDir()
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
|
||||
withEnv("BUILDX_METADATA_WARNINGS="+mode),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
|
||||
Default struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"default"`
|
||||
}
|
||||
var md mdT
|
||||
err = json.Unmarshal(dt, &md)
|
||||
require.NoError(t, err, string(dt))
|
||||
|
||||
require.NotEmpty(t, md.Default.BuildRef, string(dt))
|
||||
if mode == "" || mode == "false" {
|
||||
require.Empty(t, md.BuildWarnings, string(dt))
|
||||
return
|
||||
}
|
||||
|
||||
skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBakeMetadataWarningsDedup(t *testing.T, sb integration.Sandbox) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
bakefile := []byte(`
|
||||
group "default" {
|
||||
targets = ["base", "def"]
|
||||
}
|
||||
target "base" {
|
||||
target = "base"
|
||||
}
|
||||
target "def" {
|
||||
}
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
dirDest := t.TempDir()
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withDir(dir),
|
||||
withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
|
||||
withEnv("BUILDX_METADATA_WARNINGS=true"),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
|
||||
Base struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"base"`
|
||||
Def struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
} `json:"def"`
|
||||
}
|
||||
var md mdT
|
||||
err = json.Unmarshal(dt, &md)
|
||||
require.NoError(t, err, string(dt))
|
||||
|
||||
require.NotEmpty(t, md.Base.BuildRef, string(dt))
|
||||
require.NotEmpty(t, md.Def.BuildRef, string(dt))
|
||||
|
||||
skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBakeMultiExporters(t *testing.T, sb integration.Sandbox) {
|
||||
if !isDockerContainerWorker(sb) {
|
||||
t.Skip("only testing with docker-container worker")
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/creack/pty"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/frontend/subrequests/lint"
|
||||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
||||
|
@ -59,7 +60,8 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){
|
|||
testBuildNetworkModeBridge,
|
||||
testBuildShmSize,
|
||||
testBuildUlimit,
|
||||
testBuildMetadata,
|
||||
testBuildMetadataProvenance,
|
||||
testBuildMetadataWarnings,
|
||||
testBuildMultiExporters,
|
||||
testBuildLoadPush,
|
||||
testBuildSecret,
|
||||
|
@ -560,19 +562,22 @@ COPY --from=build /ulimit /
|
|||
require.Contains(t, string(dt), `1024`)
|
||||
}
|
||||
|
||||
func testBuildMetadata(t *testing.T, sb integration.Sandbox) {
|
||||
func testBuildMetadataProvenance(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
buildMetadataProvenance(t, sb, "")
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
buildMetadata(t, sb, "max")
|
||||
buildMetadataProvenance(t, sb, "max")
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
buildMetadata(t, sb, "min")
|
||||
buildMetadataProvenance(t, sb, "min")
|
||||
})
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
buildMetadata(t, sb, "disabled")
|
||||
buildMetadataProvenance(t, sb, "disabled")
|
||||
})
|
||||
}
|
||||
|
||||
func buildMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
func buildMetadataProvenance(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
||||
dir := createTestProject(t)
|
||||
dirDest := t.TempDir()
|
||||
|
||||
|
@ -616,6 +621,61 @@ func buildMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
|
|||
require.Equal(t, provenancetypes.BuildKitBuildType, prv.BuildType)
|
||||
}
|
||||
|
||||
func testBuildMetadataWarnings(t *testing.T, sb integration.Sandbox) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
buildMetadataWarnings(t, sb, "")
|
||||
})
|
||||
t.Run("true", func(t *testing.T) {
|
||||
buildMetadataWarnings(t, sb, "true")
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
buildMetadataWarnings(t, sb, "false")
|
||||
})
|
||||
}
|
||||
|
||||
func buildMetadataWarnings(t *testing.T, sb integration.Sandbox, mode string) {
|
||||
dockerfile := []byte(`
|
||||
frOM busybox as base
|
||||
cOpy Dockerfile .
|
||||
from scratch
|
||||
COPy --from=base \
|
||||
/Dockerfile \
|
||||
/
|
||||
`)
|
||||
dir := tmpdir(
|
||||
t,
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
|
||||
cmd := buildxCmd(
|
||||
sb,
|
||||
withArgs("build", "--metadata-file", filepath.Join(dir, "md.json"), dir),
|
||||
withEnv("BUILDX_METADATA_WARNINGS="+mode),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
type mdT struct {
|
||||
BuildRef string `json:"buildx.build.ref"`
|
||||
BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
|
||||
}
|
||||
var md mdT
|
||||
err = json.Unmarshal(dt, &md)
|
||||
require.NoError(t, err, string(dt))
|
||||
|
||||
require.NotEmpty(t, md.BuildRef, string(dt))
|
||||
if mode == "" || mode == "false" {
|
||||
require.Empty(t, md.BuildWarnings, string(dt))
|
||||
return
|
||||
}
|
||||
|
||||
skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
|
||||
require.Len(t, md.BuildWarnings, 3, string(dt))
|
||||
}
|
||||
|
||||
func testBuildMultiExporters(t *testing.T, sb integration.Sandbox) {
|
||||
if !isDockerContainerWorker(sb) {
|
||||
t.Skip("only testing with docker-container worker")
|
||||
|
|
|
@ -39,3 +39,12 @@ func ParseMetadataProvenance(inp string) MetadataProvenanceMode {
|
|||
}
|
||||
return MetadataProvenanceModeMin
|
||||
}
|
||||
|
||||
// MetadataWarningsEnabled returns whether metadata warnings are enabled from
|
||||
// BUILDX_METADATA_WARNINGS environment variable (default false)
|
||||
func MetadataWarningsEnabled() bool {
|
||||
if ok, err := strconv.ParseBool(os.Getenv("BUILDX_METADATA_WARNINGS")); err == nil {
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/mitchellh/hashstructure/v2"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
@ -58,7 +59,7 @@ func (p *Printer) Write(s *client.SolveStatus) {
|
|||
}
|
||||
|
||||
func (p *Printer) Warnings() []client.VertexWarning {
|
||||
return p.warnings
|
||||
return dedupWarnings(p.warnings)
|
||||
}
|
||||
|
||||
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
|
||||
|
@ -184,3 +185,26 @@ func WithOnClose(onclose func()) PrinterOpt {
|
|||
opt.onclose = onclose
|
||||
}
|
||||
}
|
||||
|
||||
func dedupWarnings(inp []client.VertexWarning) []client.VertexWarning {
|
||||
m := make(map[uint64]client.VertexWarning)
|
||||
for _, w := range inp {
|
||||
wcp := w
|
||||
wcp.Vertex = ""
|
||||
if wcp.SourceInfo != nil {
|
||||
wcp.SourceInfo.Definition = nil
|
||||
}
|
||||
h, err := hashstructure.Hash(wcp, hashstructure.FormatV2, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := m[h]; !ok {
|
||||
m[h] = w
|
||||
}
|
||||
}
|
||||
res := make([]client.VertexWarning, 0, len(m))
|
||||
for _, w := range m {
|
||||
res = append(res, w)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,76 @@
|
|||
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
|
||||
|
||||
hashstructure is a Go library for creating a unique hash value
|
||||
for arbitrary values in Go.
|
||||
|
||||
This can be used to key values in a hash (for use in a map, set, etc.)
|
||||
that are complex. The most common use case is comparing two values without
|
||||
sending data across the network, caching values locally (de-dup), and so on.
|
||||
|
||||
## Features
|
||||
|
||||
* Hash any arbitrary Go value, including complex types.
|
||||
|
||||
* Tag a struct field to ignore it and not affect the hash value.
|
||||
|
||||
* Tag a slice type struct field to treat it as a set where ordering
|
||||
doesn't affect the hash code but the field itself is still taken into
|
||||
account to create the hash value.
|
||||
|
||||
* Optionally, specify a custom hash function to optimize for speed, collision
|
||||
avoidance for your data set, etc.
|
||||
|
||||
* Optionally, hash the output of `.String()` on structs that implement fmt.Stringer,
|
||||
allowing effective hashing of time.Time
|
||||
|
||||
* Optionally, override the hashing process by implementing `Hashable`.
|
||||
|
||||
## Installation
|
||||
|
||||
Standard `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/mitchellh/hashstructure/v2
|
||||
```
|
||||
|
||||
**Note on v2:** It is highly recommended you use the "v2" release since this
|
||||
fixes some significant hash collisions issues from v1. In practice, we used
|
||||
v1 for many years in real projects at HashiCorp and never had issues, but it
|
||||
is highly dependent on the shape of the data you're hashing and how you use
|
||||
those hashes.
|
||||
|
||||
When using v2+, you can still generate weaker v1 hashes by using the
|
||||
`FormatV1` format when calling `Hash`.
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
|
||||
|
||||
A quick code example is shown below:
|
||||
|
||||
```go
|
||||
type ComplexStruct struct {
|
||||
Name string
|
||||
Age uint
|
||||
Metadata map[string]interface{}
|
||||
}
|
||||
|
||||
v := ComplexStruct{
|
||||
Name: "mitchellh",
|
||||
Age: 64,
|
||||
Metadata: map[string]interface{}{
|
||||
"car": true,
|
||||
"location": "California",
|
||||
"siblings": []string{"Bob", "John"},
|
||||
},
|
||||
}
|
||||
|
||||
hash, err := hashstructure.Hash(v, hashstructure.FormatV2, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%d", hash)
|
||||
// Output:
|
||||
// 2307517237273902113
|
||||
```
|
|
@ -0,0 +1,22 @@
|
|||
package hashstructure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrNotStringer is returned when there's an error with hash:"string"
|
||||
type ErrNotStringer struct {
|
||||
Field string
|
||||
}
|
||||
|
||||
// Error implements error for ErrNotStringer
|
||||
func (ens *ErrNotStringer) Error() string {
|
||||
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
|
||||
}
|
||||
|
||||
// ErrFormat is returned when an invalid format is given to the Hash function.
|
||||
type ErrFormat struct{}
|
||||
|
||||
func (*ErrFormat) Error() string {
|
||||
return "format must be one of the defined Format values in the hashstructure library"
|
||||
}
|
|
@ -0,0 +1,482 @@
|
|||
package hashstructure
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HashOptions are options that are available for hashing.
|
||||
type HashOptions struct {
|
||||
// Hasher is the hash function to use. If this isn't set, it will
|
||||
// default to FNV.
|
||||
Hasher hash.Hash64
|
||||
|
||||
// TagName is the struct tag to look at when hashing the structure.
|
||||
// By default this is "hash".
|
||||
TagName string
|
||||
|
||||
// ZeroNil is flag determining if nil pointer should be treated equal
|
||||
// to a zero value of pointed type. By default this is false.
|
||||
ZeroNil bool
|
||||
|
||||
// IgnoreZeroValue is determining if zero value fields should be
|
||||
// ignored for hash calculation.
|
||||
IgnoreZeroValue bool
|
||||
|
||||
// SlicesAsSets assumes that a `set` tag is always present for slices.
|
||||
// Default is false (in which case the tag is used instead)
|
||||
SlicesAsSets bool
|
||||
|
||||
// UseStringer will attempt to use fmt.Stringer always. If the struct
|
||||
// doesn't implement fmt.Stringer, it'll fall back to trying usual tricks.
|
||||
// If this is true, and the "string" tag is also set, the tag takes
|
||||
// precedence (meaning that if the type doesn't implement fmt.Stringer, we
|
||||
// panic)
|
||||
UseStringer bool
|
||||
}
|
||||
|
||||
// Format specifies the hashing process used. Different formats typically
|
||||
// generate different hashes for the same value and have different properties.
|
||||
type Format uint
|
||||
|
||||
const (
|
||||
// To disallow the zero value
|
||||
formatInvalid Format = iota
|
||||
|
||||
// FormatV1 is the format used in v1.x of this library. This has the
|
||||
// downsides noted in issue #18 but allows simultaneous v1/v2 usage.
|
||||
FormatV1
|
||||
|
||||
// FormatV2 is the current recommended format and fixes the issues
|
||||
// noted in FormatV1.
|
||||
FormatV2
|
||||
|
||||
formatMax // so we can easily find the end
|
||||
)
|
||||
|
||||
// Hash returns the hash value of an arbitrary value.
|
||||
//
|
||||
// If opts is nil, then default options will be used. See HashOptions
|
||||
// for the default values. The same *HashOptions value cannot be used
|
||||
// concurrently. None of the values within a *HashOptions struct are
|
||||
// safe to read/write while hashing is being done.
|
||||
//
|
||||
// The "format" is required and must be one of the format values defined
|
||||
// by this library. You should probably just use "FormatV2". This allows
|
||||
// generated hashes uses alternate logic to maintain compatibility with
|
||||
// older versions.
|
||||
//
|
||||
// Notes on the value:
|
||||
//
|
||||
// * Unexported fields on structs are ignored and do not affect the
|
||||
// hash value.
|
||||
//
|
||||
// * Adding an exported field to a struct with the zero value will change
|
||||
// the hash value.
|
||||
//
|
||||
// For structs, the hashing can be controlled using tags. For example:
|
||||
//
|
||||
// struct {
|
||||
// Name string
|
||||
// UUID string `hash:"ignore"`
|
||||
// }
|
||||
//
|
||||
// The available tag values are:
|
||||
//
|
||||
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
|
||||
//
|
||||
// * "set" - The field will be treated as a set, where ordering doesn't
|
||||
// affect the hash code. This only works for slices.
|
||||
//
|
||||
// * "string" - The field will be hashed as a string, only works when the
|
||||
// field implements fmt.Stringer
|
||||
//
|
||||
func Hash(v interface{}, format Format, opts *HashOptions) (uint64, error) {
|
||||
// Validate our format
|
||||
if format <= formatInvalid || format >= formatMax {
|
||||
return 0, &ErrFormat{}
|
||||
}
|
||||
|
||||
// Create default options
|
||||
if opts == nil {
|
||||
opts = &HashOptions{}
|
||||
}
|
||||
if opts.Hasher == nil {
|
||||
opts.Hasher = fnv.New64()
|
||||
}
|
||||
if opts.TagName == "" {
|
||||
opts.TagName = "hash"
|
||||
}
|
||||
|
||||
// Reset the hash
|
||||
opts.Hasher.Reset()
|
||||
|
||||
// Create our walker and walk the structure
|
||||
w := &walker{
|
||||
format: format,
|
||||
h: opts.Hasher,
|
||||
tag: opts.TagName,
|
||||
zeronil: opts.ZeroNil,
|
||||
ignorezerovalue: opts.IgnoreZeroValue,
|
||||
sets: opts.SlicesAsSets,
|
||||
stringer: opts.UseStringer,
|
||||
}
|
||||
return w.visit(reflect.ValueOf(v), nil)
|
||||
}
|
||||
|
||||
type walker struct {
|
||||
format Format
|
||||
h hash.Hash64
|
||||
tag string
|
||||
zeronil bool
|
||||
ignorezerovalue bool
|
||||
sets bool
|
||||
stringer bool
|
||||
}
|
||||
|
||||
type visitOpts struct {
|
||||
// Flags are a bitmask of flags to affect behavior of this visit
|
||||
Flags visitFlag
|
||||
|
||||
// Information about the struct containing this field
|
||||
Struct interface{}
|
||||
StructField string
|
||||
}
|
||||
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
|
||||
t := reflect.TypeOf(0)
|
||||
|
||||
// Loop since these can be wrapped in multiple layers of pointers
|
||||
// and interfaces.
|
||||
for {
|
||||
// If we have an interface, dereference it. We have to do this up
|
||||
// here because it might be a nil in there and the check below must
|
||||
// catch that.
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if w.zeronil {
|
||||
t = v.Type().Elem()
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// If it is nil, treat it like a zero.
|
||||
if !v.IsValid() {
|
||||
v = reflect.Zero(t)
|
||||
}
|
||||
|
||||
// Binary writing can use raw ints, we have to convert to
|
||||
// a sized-int, we'll choose the largest...
|
||||
switch v.Kind() {
|
||||
case reflect.Int:
|
||||
v = reflect.ValueOf(int64(v.Int()))
|
||||
case reflect.Uint:
|
||||
v = reflect.ValueOf(uint64(v.Uint()))
|
||||
case reflect.Bool:
|
||||
var tmp int8
|
||||
if v.Bool() {
|
||||
tmp = 1
|
||||
}
|
||||
v = reflect.ValueOf(tmp)
|
||||
}
|
||||
|
||||
k := v.Kind()
|
||||
|
||||
// We can shortcut numeric values by directly binary writing them
|
||||
if k >= reflect.Int && k <= reflect.Complex64 {
|
||||
// A direct hash calculation
|
||||
w.h.Reset()
|
||||
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
|
||||
return w.h.Sum64(), err
|
||||
}
|
||||
|
||||
switch v.Type() {
|
||||
case timeType:
|
||||
w.h.Reset()
|
||||
b, err := v.Interface().(time.Time).MarshalBinary()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = binary.Write(w.h, binary.LittleEndian, b)
|
||||
return w.h.Sum64(), err
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Array:
|
||||
var h uint64
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
current, err := w.visit(v.Index(i), nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
h = hashUpdateOrdered(w.h, h, current)
|
||||
}
|
||||
|
||||
return h, nil
|
||||
|
||||
case reflect.Map:
|
||||
var includeMap IncludableMap
|
||||
if opts != nil && opts.Struct != nil {
|
||||
if v, ok := opts.Struct.(IncludableMap); ok {
|
||||
includeMap = v
|
||||
}
|
||||
}
|
||||
|
||||
// Build the hash for the map. We do this by XOR-ing all the key
|
||||
// and value hashes. This makes it deterministic despite ordering.
|
||||
var h uint64
|
||||
for _, k := range v.MapKeys() {
|
||||
v := v.MapIndex(k)
|
||||
if includeMap != nil {
|
||||
incl, err := includeMap.HashIncludeMap(
|
||||
opts.StructField, k.Interface(), v.Interface())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !incl {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
kh, err := w.visit(k, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vh, err := w.visit(v, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
fieldHash := hashUpdateOrdered(w.h, kh, vh)
|
||||
h = hashUpdateUnordered(h, fieldHash)
|
||||
}
|
||||
|
||||
if w.format != FormatV1 {
|
||||
// Important: read the docs for hashFinishUnordered
|
||||
h = hashFinishUnordered(w.h, h)
|
||||
}
|
||||
|
||||
return h, nil
|
||||
|
||||
case reflect.Struct:
|
||||
parent := v.Interface()
|
||||
var include Includable
|
||||
if impl, ok := parent.(Includable); ok {
|
||||
include = impl
|
||||
}
|
||||
|
||||
if impl, ok := parent.(Hashable); ok {
|
||||
return impl.Hash()
|
||||
}
|
||||
|
||||
// If we can address this value, check if the pointer value
|
||||
// implements our interfaces and use that if so.
|
||||
if v.CanAddr() {
|
||||
vptr := v.Addr()
|
||||
parentptr := vptr.Interface()
|
||||
if impl, ok := parentptr.(Includable); ok {
|
||||
include = impl
|
||||
}
|
||||
|
||||
if impl, ok := parentptr.(Hashable); ok {
|
||||
return impl.Hash()
|
||||
}
|
||||
}
|
||||
|
||||
t := v.Type()
|
||||
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
l := v.NumField()
|
||||
for i := 0; i < l; i++ {
|
||||
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
|
||||
var f visitFlag
|
||||
fieldType := t.Field(i)
|
||||
if fieldType.PkgPath != "" {
|
||||
// Unexported
|
||||
continue
|
||||
}
|
||||
|
||||
tag := fieldType.Tag.Get(w.tag)
|
||||
if tag == "ignore" || tag == "-" {
|
||||
// Ignore this field
|
||||
continue
|
||||
}
|
||||
|
||||
if w.ignorezerovalue {
|
||||
if innerV.IsZero() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if string is set, use the string value
|
||||
if tag == "string" || w.stringer {
|
||||
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
|
||||
innerV = reflect.ValueOf(impl.String())
|
||||
} else if tag == "string" {
|
||||
// We only show this error if the tag explicitly
|
||||
// requests a stringer.
|
||||
return 0, &ErrNotStringer{
|
||||
Field: v.Type().Field(i).Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we implement includable and check it
|
||||
if include != nil {
|
||||
incl, err := include.HashInclude(fieldType.Name, innerV)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !incl {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch tag {
|
||||
case "set":
|
||||
f |= visitFlagSet
|
||||
}
|
||||
|
||||
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
vh, err := w.visit(innerV, &visitOpts{
|
||||
Flags: f,
|
||||
Struct: parent,
|
||||
StructField: fieldType.Name,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
fieldHash := hashUpdateOrdered(w.h, kh, vh)
|
||||
h = hashUpdateUnordered(h, fieldHash)
|
||||
}
|
||||
|
||||
if w.format != FormatV1 {
|
||||
// Important: read the docs for hashFinishUnordered
|
||||
h = hashFinishUnordered(w.h, h)
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
|
||||
case reflect.Slice:
|
||||
// We have two behaviors here. If it isn't a set, then we just
|
||||
// visit all the elements. If it is a set, then we do a deterministic
|
||||
// hash code.
|
||||
var h uint64
|
||||
var set bool
|
||||
if opts != nil {
|
||||
set = (opts.Flags & visitFlagSet) != 0
|
||||
}
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
current, err := w.visit(v.Index(i), nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if set || w.sets {
|
||||
h = hashUpdateUnordered(h, current)
|
||||
} else {
|
||||
h = hashUpdateOrdered(w.h, h, current)
|
||||
}
|
||||
}
|
||||
|
||||
if set && w.format != FormatV1 {
|
||||
// Important: read the docs for hashFinishUnordered
|
||||
h = hashFinishUnordered(w.h, h)
|
||||
}
|
||||
|
||||
return h, nil
|
||||
|
||||
case reflect.String:
|
||||
// Directly hash
|
||||
w.h.Reset()
|
||||
_, err := w.h.Write([]byte(v.String()))
|
||||
return w.h.Sum64(), err
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown kind to hash: %s", k)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
|
||||
// For ordered updates, use a real hash function
|
||||
h.Reset()
|
||||
|
||||
// We just panic if the binary writes fail because we are writing
|
||||
// an int64 which should never be fail-able.
|
||||
e1 := binary.Write(h, binary.LittleEndian, a)
|
||||
e2 := binary.Write(h, binary.LittleEndian, b)
|
||||
if e1 != nil {
|
||||
panic(e1)
|
||||
}
|
||||
if e2 != nil {
|
||||
panic(e2)
|
||||
}
|
||||
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func hashUpdateUnordered(a, b uint64) uint64 {
|
||||
return a ^ b
|
||||
}
|
||||
|
||||
// After mixing a group of unique hashes with hashUpdateUnordered, it's always
|
||||
// necessary to call hashFinishUnordered. Why? Because hashUpdateUnordered
|
||||
// is a simple XOR, and calling hashUpdateUnordered on hashes produced by
|
||||
// hashUpdateUnordered can effectively cancel out a previous change to the hash
|
||||
// result if the same hash value appears later on. For example, consider:
|
||||
//
|
||||
// hashUpdateUnordered(hashUpdateUnordered("A", "B"), hashUpdateUnordered("A", "C")) =
|
||||
// H("A") ^ H("B")) ^ (H("A") ^ H("C")) =
|
||||
// (H("A") ^ H("A")) ^ (H("B") ^ H(C)) =
|
||||
// H(B) ^ H(C) =
|
||||
// hashUpdateUnordered(hashUpdateUnordered("Z", "B"), hashUpdateUnordered("Z", "C"))
|
||||
//
|
||||
// hashFinishUnordered "hardens" the result, so that encountering partially
|
||||
// overlapping input data later on in a different context won't cancel out.
|
||||
func hashFinishUnordered(h hash.Hash64, a uint64) uint64 {
|
||||
h.Reset()
|
||||
|
||||
// We just panic if the writes fail
|
||||
e1 := binary.Write(h, binary.LittleEndian, a)
|
||||
if e1 != nil {
|
||||
panic(e1)
|
||||
}
|
||||
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// visitFlag is used as a bitmask for affecting visit behavior
|
||||
type visitFlag uint
|
||||
|
||||
const (
|
||||
visitFlagInvalid visitFlag = iota
|
||||
visitFlagSet = iota << 1
|
||||
)
|
|
@ -0,0 +1,22 @@
|
|||
package hashstructure
|
||||
|
||||
// Includable is an interface that can optionally be implemented by
|
||||
// a struct. It will be called for each field in the struct to check whether
|
||||
// it should be included in the hash.
|
||||
type Includable interface {
|
||||
HashInclude(field string, v interface{}) (bool, error)
|
||||
}
|
||||
|
||||
// IncludableMap is an interface that can optionally be implemented by
|
||||
// a struct. It will be called when a map-type field is found to ask the
|
||||
// struct if the map item should be included in the hash.
|
||||
type IncludableMap interface {
|
||||
HashIncludeMap(field string, k, v interface{}) (bool, error)
|
||||
}
|
||||
|
||||
// Hashable is an interface that can optionally be implemented by a struct
|
||||
// to override the hash value. This value will override the hash value for
|
||||
// the entire struct. Entries in the struct will not be hashed.
|
||||
type Hashable interface {
|
||||
Hash() (uint64, error)
|
||||
}
|
|
@ -511,6 +511,9 @@ github.com/miekg/pkcs11
|
|||
# github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7
|
||||
## explicit
|
||||
github.com/mitchellh/go-wordwrap
|
||||
# github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
## explicit; go 1.14
|
||||
github.com/mitchellh/hashstructure/v2
|
||||
# github.com/mitchellh/mapstructure v1.5.0
|
||||
## explicit; go 1.14
|
||||
# github.com/moby/buildkit v0.14.1
|
||||
|
|
Loading…
Reference in New Issue