mirror of https://github.com/docker/buildx.git
bake: initial set of composable bake attributes
This allows using either the csv syntax or object syntax to specify certain attributes. This applies to the following fields: - output - cache-from - cache-to - secret - ssh There are still some remaining fields to translate. Specifically ulimits, annotations, and attest. Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
This commit is contained in:
parent
5937ba0e00
commit
4070cb4d22
323
bake/bake.go
323
bake/bake.go
|
@ -2,6 +2,7 @@ package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -496,7 +497,9 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t2.Outputs = []string{"type=cacheonly"}
|
t2.Outputs = []*buildflags.ExportEntry{
|
||||||
|
{Type: "cacheonly"},
|
||||||
|
}
|
||||||
t2.linked = true
|
t2.linked = true
|
||||||
m[target] = t2
|
m[target] = t2
|
||||||
}
|
}
|
||||||
|
@ -695,30 +698,30 @@ type Target struct {
|
||||||
// Inherits is the only field that cannot be overridden with --set
|
// Inherits is the only field that cannot be overridden with --set
|
||||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
|
||||||
|
|
||||||
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
|
Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
|
||||||
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
|
||||||
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"`
|
||||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
|
||||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
|
||||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
|
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
|
||||||
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
|
Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
|
||||||
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
|
Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
|
||||||
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
|
Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
|
||||||
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
|
CacheFrom []*buildflags.CacheOptionsEntry `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
|
||||||
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
|
CacheTo []*buildflags.CacheOptionsEntry `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
|
||||||
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
|
Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"`
|
||||||
Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
|
Secrets []*buildflags.Secret `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
|
||||||
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
|
SSH []*buildflags.SSH `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
|
||||||
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
|
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
|
||||||
Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
Outputs []*buildflags.ExportEntry `json:"output,omitempty" hcl:"output,optional" cty:"output"`
|
||||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
|
||||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
|
||||||
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
|
NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"`
|
||||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
|
||||||
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"`
|
||||||
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"`
|
||||||
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"`
|
||||||
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
|
||||||
|
|
||||||
// linked is a private field to mark a target used as a linked one
|
// linked is a private field to mark a target used as a linked one
|
||||||
|
@ -733,23 +736,23 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (t *Target) normalize() {
|
func (t *Target) normalize() {
|
||||||
t.Annotations = removeDupes(t.Annotations)
|
t.Annotations = removeDupesStr(t.Annotations)
|
||||||
t.Attest = removeAttestDupes(t.Attest)
|
t.Attest = removeAttestDupes(t.Attest)
|
||||||
t.Tags = removeDupes(t.Tags)
|
t.Tags = removeDupesStr(t.Tags)
|
||||||
t.Secrets = removeDupes(t.Secrets)
|
t.Secrets = removeDupes(t.Secrets)
|
||||||
t.SSH = removeDupes(t.SSH)
|
t.SSH = removeDupes(t.SSH)
|
||||||
t.Platforms = removeDupes(t.Platforms)
|
t.Platforms = removeDupesStr(t.Platforms)
|
||||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
t.CacheFrom = removeDupes(t.CacheFrom)
|
||||||
t.CacheTo = removeDupes(t.CacheTo)
|
t.CacheTo = removeDupes(t.CacheTo)
|
||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = removeDupes(t.Outputs)
|
||||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
t.NoCacheFilter = removeDupesStr(t.NoCacheFilter)
|
||||||
t.Ulimits = removeDupes(t.Ulimits)
|
t.Ulimits = removeDupesStr(t.Ulimits)
|
||||||
|
|
||||||
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
if t.NetworkMode != nil && *t.NetworkMode == "host" {
|
||||||
t.Entitlements = append(t.Entitlements, "network.host")
|
t.Entitlements = append(t.Entitlements, "network.host")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Entitlements = removeDupes(t.Entitlements)
|
t.Entitlements = removeDupesStr(t.Entitlements)
|
||||||
|
|
||||||
for k, v := range t.Contexts {
|
for k, v := range t.Contexts {
|
||||||
if v == "" {
|
if v == "" {
|
||||||
|
@ -892,21 +895,41 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||||
case "tags":
|
case "tags":
|
||||||
t.Tags = o.ArrValue
|
t.Tags = o.ArrValue
|
||||||
case "cache-from":
|
case "cache-from":
|
||||||
t.CacheFrom = o.ArrValue
|
cacheFrom, err := parseCacheArrValues(o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheFrom = cacheFrom
|
||||||
case "cache-to":
|
case "cache-to":
|
||||||
t.CacheTo = o.ArrValue
|
cacheTo, err := parseCacheArrValues(o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheTo = cacheTo
|
||||||
case "target":
|
case "target":
|
||||||
t.Target = &value
|
t.Target = &value
|
||||||
case "call":
|
case "call":
|
||||||
t.Call = &value
|
t.Call = &value
|
||||||
case "secrets":
|
case "secrets":
|
||||||
t.Secrets = o.ArrValue
|
secrets, err := parseArrValue[buildflags.Secret](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
t.Secrets = secrets
|
||||||
case "ssh":
|
case "ssh":
|
||||||
t.SSH = o.ArrValue
|
ssh, err := parseArrValue[buildflags.SSH](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
t.SSH = ssh
|
||||||
case "platform":
|
case "platform":
|
||||||
t.Platforms = o.ArrValue
|
t.Platforms = o.ArrValue
|
||||||
case "output":
|
case "output":
|
||||||
t.Outputs = o.ArrValue
|
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid value for outputs")
|
||||||
|
}
|
||||||
|
t.Outputs = outputs
|
||||||
case "entitlements":
|
case "entitlements":
|
||||||
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
t.Entitlements = append(t.Entitlements, o.ArrValue...)
|
||||||
case "annotations":
|
case "annotations":
|
||||||
|
@ -1311,23 +1334,27 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||||
}
|
}
|
||||||
bo.Platforms = platforms
|
bo.Platforms = platforms
|
||||||
|
|
||||||
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
secrets := make([]*controllerapi.Secret, len(t.Secrets))
|
||||||
if err != nil {
|
for i, s := range t.Secrets {
|
||||||
return nil, err
|
secrets[i] = s.ToPB()
|
||||||
}
|
}
|
||||||
|
|
||||||
secretAttachment, err := controllerapi.CreateSecrets(secrets)
|
secretAttachment, err := controllerapi.CreateSecrets(secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bo.Session = append(bo.Session, secretAttachment)
|
bo.Session = append(bo.Session, secretAttachment)
|
||||||
|
|
||||||
sshSpecs, err := buildflags.ParseSSHSpecs(t.SSH)
|
var sshSpecs []*controllerapi.SSH
|
||||||
if err != nil {
|
if len(t.SSH) > 0 {
|
||||||
return nil, err
|
sshSpecs := make([]*controllerapi.SSH, len(t.SSH))
|
||||||
}
|
for i, s := range t.SSH {
|
||||||
if len(sshSpecs) == 0 && (buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL))) {
|
sshSpecs[i] = s.ToPB()
|
||||||
sshSpecs = append(sshSpecs, &controllerapi.SSH{ID: "default"})
|
}
|
||||||
|
} else if buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL)) {
|
||||||
|
sshSpecs = []*controllerapi.SSH{{ID: "default"}}
|
||||||
}
|
}
|
||||||
|
|
||||||
sshAttachment, err := controllerapi.CreateSSH(sshSpecs)
|
sshAttachment, err := controllerapi.CreateSSH(sshSpecs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1344,22 +1371,23 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
cacheImports := make([]*controllerapi.CacheOptionsEntry, len(t.CacheFrom))
|
||||||
if err != nil {
|
for i, ci := range t.CacheFrom {
|
||||||
return nil, err
|
cacheImports[i] = ci.ToPB()
|
||||||
}
|
}
|
||||||
bo.CacheFrom = controllerapi.CreateCaches(cacheImports)
|
bo.CacheFrom = controllerapi.CreateCaches(cacheImports)
|
||||||
|
|
||||||
cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
|
cacheExports := make([]*controllerapi.CacheOptionsEntry, len(t.CacheTo))
|
||||||
if err != nil {
|
for i, ce := range t.CacheTo {
|
||||||
return nil, err
|
cacheExports[i] = ce.ToPB()
|
||||||
}
|
}
|
||||||
bo.CacheTo = controllerapi.CreateCaches(cacheExports)
|
bo.CacheTo = controllerapi.CreateCaches(cacheExports)
|
||||||
|
|
||||||
outputs, err := buildflags.ParseExports(t.Outputs)
|
outputs := make([]*controllerapi.ExportEntry, len(t.Outputs))
|
||||||
if err != nil {
|
for i, output := range t.Outputs {
|
||||||
return nil, err
|
outputs[i] = output.ToPB()
|
||||||
}
|
}
|
||||||
|
|
||||||
bo.Exports, err = controllerapi.CreateExports(outputs)
|
bo.Exports, err = controllerapi.CreateExports(outputs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1405,7 +1433,35 @@ func defaultTarget() *Target {
|
||||||
return &Target{}
|
return &Target{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeDupes(s []string) []string {
|
type comparable[E any] interface {
|
||||||
|
Equal(other E) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDupes[E comparable[E]](s []E) []E {
|
||||||
|
// Move backwards through the slice.
|
||||||
|
// For each element, any elements after the current element are unique.
|
||||||
|
// If we find our current element conflicts with an existing element,
|
||||||
|
// then we swap the offender with the end of the slice and chop it off.
|
||||||
|
|
||||||
|
// Start at the second to last element.
|
||||||
|
// The last element is always unique.
|
||||||
|
for i := len(s) - 2; i >= 0; i-- {
|
||||||
|
elem := s[i]
|
||||||
|
// Check for duplicates after our current element.
|
||||||
|
for j := i + 1; j < len(s); j++ {
|
||||||
|
if elem.Equal(s[j]) {
|
||||||
|
// Found a duplicate, exchange the
|
||||||
|
// duplicate with the last element.
|
||||||
|
s[j], s[len(s)-1] = s[len(s)-1], s[j]
|
||||||
|
s = s[:len(s)-1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDupesStr(s []string) []string {
|
||||||
i := 0
|
i := 0
|
||||||
seen := make(map[string]struct{}, len(s))
|
seen := make(map[string]struct{}, len(s))
|
||||||
for _, v := range s {
|
for _, v := range s {
|
||||||
|
@ -1442,86 +1498,76 @@ func removeAttestDupes(s []string) []string {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOutput(str string) map[string]string {
|
func setPushOverride(outputs []*buildflags.ExportEntry, push bool) []*buildflags.ExportEntry {
|
||||||
fields, err := csvvalue.Fields(str, nil)
|
if !push {
|
||||||
if err != nil {
|
// Disable push for any relevant export types
|
||||||
return nil
|
for i := 0; i < len(outputs); {
|
||||||
}
|
output := outputs[i]
|
||||||
res := map[string]string{}
|
switch output.Type {
|
||||||
for _, field := range fields {
|
case "registry":
|
||||||
parts := strings.SplitN(field, "=", 2)
|
// Filter out registry output type
|
||||||
if len(parts) == 2 {
|
outputs[i], outputs[len(outputs)-1] = outputs[len(outputs)-1], outputs[i]
|
||||||
res[parts[0]] = parts[1]
|
outputs = outputs[:len(outputs)-1]
|
||||||
|
continue
|
||||||
|
case "image":
|
||||||
|
// Override push attribute
|
||||||
|
output.Attrs["push"] = "false"
|
||||||
|
}
|
||||||
|
i++
|
||||||
}
|
}
|
||||||
|
return outputs
|
||||||
}
|
}
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOutputType(str string) string {
|
// Force push to be enabled
|
||||||
if out := parseOutput(str); out != nil {
|
|
||||||
if v, ok := out["type"]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPushOverride(outputs []string, push bool) []string {
|
|
||||||
var out []string
|
|
||||||
setPush := true
|
setPush := true
|
||||||
for _, output := range outputs {
|
for _, output := range outputs {
|
||||||
typ := parseOutputType(output)
|
if output.Type != "docker" {
|
||||||
if typ == "image" || typ == "registry" {
|
// If there is an output type that is not docker, don't set "push"
|
||||||
// no need to set push if image or registry types already defined
|
|
||||||
setPush = false
|
setPush = false
|
||||||
if typ == "registry" {
|
}
|
||||||
if !push {
|
|
||||||
// don't set registry output if "push" is false
|
// Set push attribute for image
|
||||||
continue
|
if output.Type == "image" {
|
||||||
}
|
output.Attrs["push"] = "true"
|
||||||
// no need to set "push" attribute to true for registry
|
|
||||||
out = append(out, output)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out = append(out, output+",push="+strconv.FormatBool(push))
|
|
||||||
} else {
|
|
||||||
if typ != "docker" {
|
|
||||||
// if there is any output that is not docker, don't set "push"
|
|
||||||
setPush = false
|
|
||||||
}
|
|
||||||
out = append(out, output)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if push && setPush {
|
|
||||||
out = append(out, "type=image,push=true")
|
if setPush {
|
||||||
|
// No existing output that pushes so add one
|
||||||
|
outputs = append(outputs, &buildflags.ExportEntry{
|
||||||
|
Type: "image",
|
||||||
|
Attrs: map[string]string{
|
||||||
|
"push": "true",
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return out
|
return outputs
|
||||||
}
|
}
|
||||||
|
|
||||||
func setLoadOverride(outputs []string, load bool) []string {
|
func setLoadOverride(outputs []*buildflags.ExportEntry, load bool) []*buildflags.ExportEntry {
|
||||||
if !load {
|
if !load {
|
||||||
return outputs
|
return outputs
|
||||||
}
|
}
|
||||||
setLoad := true
|
|
||||||
for _, output := range outputs {
|
for _, output := range outputs {
|
||||||
if typ := parseOutputType(output); typ == "docker" {
|
switch output.Type {
|
||||||
if v := parseOutput(output); v != nil {
|
case "docker":
|
||||||
// dest set means we want to output as tar so don't set load
|
// if dest is not set, we can reuse this entry and do not need to set load
|
||||||
if _, ok := v["dest"]; !ok {
|
if output.Destination == "" {
|
||||||
setLoad = false
|
return outputs
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if typ != "image" && typ != "registry" && typ != "oci" {
|
case "image", "registry", "oci":
|
||||||
|
// Ignore
|
||||||
|
default:
|
||||||
// if there is any output that is not an image, registry
|
// if there is any output that is not an image, registry
|
||||||
// or oci, don't set "load" similar to push override
|
// or oci, don't set "load" similar to push override
|
||||||
setLoad = false
|
return outputs
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if setLoad {
|
|
||||||
outputs = append(outputs, "type=docker")
|
outputs = append(outputs, &buildflags.ExportEntry{
|
||||||
}
|
Type: "docker",
|
||||||
|
})
|
||||||
return outputs
|
return outputs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1560,3 +1606,50 @@ func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
||||||
}
|
}
|
||||||
return m2
|
return m2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type arrValue[B any] interface {
|
||||||
|
encoding.TextUnmarshaler
|
||||||
|
*B
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) {
|
||||||
|
outputs := make([]*T, 0, len(s))
|
||||||
|
for _, text := range s {
|
||||||
|
output := new(T)
|
||||||
|
if err := PT(output).UnmarshalText([]byte(text)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
outputs = append(outputs, output)
|
||||||
|
}
|
||||||
|
return outputs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCacheArrValues(s []string) ([]*buildflags.CacheOptionsEntry, error) {
|
||||||
|
outs := make([]*buildflags.CacheOptionsEntry, 0, len(s))
|
||||||
|
for _, in := range s {
|
||||||
|
if !strings.Contains(in, "=") {
|
||||||
|
// This is ref only format. Each field in the CSV is its own entry.
|
||||||
|
fields, err := csvvalue.Fields(in, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, field := range fields {
|
||||||
|
out := buildflags.CacheOptionsEntry{}
|
||||||
|
if err := out.UnmarshalText([]byte(field)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
outs = append(outs, &out)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal entry.
|
||||||
|
out := buildflags.CacheOptionsEntry{}
|
||||||
|
if err := out.UnmarshalText([]byte(in)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
outs = append(outs, &out)
|
||||||
|
}
|
||||||
|
return outs, nil
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -228,7 +229,7 @@ func TestPushOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
|
require.Equal(t, "type=image,push=true", m["app"].Outputs[0].String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type image", func(t *testing.T) {
|
t.Run("type image", func(t *testing.T) {
|
||||||
|
@ -242,7 +243,7 @@ func TestPushOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0].String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type image push false", func(t *testing.T) {
|
t.Run("type image push false", func(t *testing.T) {
|
||||||
|
@ -256,7 +257,7 @@ func TestPushOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0].String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type registry", func(t *testing.T) {
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
@ -270,7 +271,7 @@ func TestPushOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
require.Equal(t, "type=registry", m["app"].Outputs[0].String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type registry push false", func(t *testing.T) {
|
t.Run("type registry push false", func(t *testing.T) {
|
||||||
|
@ -300,9 +301,9 @@ func TestPushOverride(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, 1, len(m["foo"].Outputs))
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
|
||||||
require.Equal(t, 1, len(m["bar"].Outputs))
|
require.Equal(t, 1, len(m["bar"].Outputs))
|
||||||
require.Equal(t, []string{"type=image,push=true"}, m["bar"].Outputs)
|
require.Equal(t, []string{"type=image,push=true"}, stringify(m["bar"].Outputs))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +318,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=docker", m["app"].Outputs[0])
|
require.Equal(t, "type=docker", m["app"].Outputs[0].String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type docker", func(t *testing.T) {
|
t.Run("type docker", func(t *testing.T) {
|
||||||
|
@ -331,7 +332,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=docker"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type image", func(t *testing.T) {
|
t.Run("type image", func(t *testing.T) {
|
||||||
|
@ -345,7 +346,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m["app"].Outputs))
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=image", "type=docker"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=image"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type image load false", func(t *testing.T) {
|
t.Run("type image load false", func(t *testing.T) {
|
||||||
|
@ -359,7 +360,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=image"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=image"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type registry", func(t *testing.T) {
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
@ -373,7 +374,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m["app"].Outputs))
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=registry", "type=docker"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type oci", func(t *testing.T) {
|
t.Run("type oci", func(t *testing.T) {
|
||||||
|
@ -387,7 +388,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m["app"].Outputs))
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=oci,dest=out", "type=docker"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=oci,dest=out"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type docker with dest", func(t *testing.T) {
|
t.Run("type docker with dest", func(t *testing.T) {
|
||||||
|
@ -401,7 +402,7 @@ func TestLoadOverride(t *testing.T) {
|
||||||
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m["app"].Outputs))
|
require.Equal(t, 2, len(m["app"].Outputs))
|
||||||
require.Equal(t, []string{"type=docker,dest=out", "type=docker"}, m["app"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=docker,dest=out"}, stringify(m["app"].Outputs))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type local and empty target", func(t *testing.T) {
|
t.Run("type local and empty target", func(t *testing.T) {
|
||||||
|
@ -418,9 +419,9 @@ func TestLoadOverride(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, 1, len(m["foo"].Outputs))
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
|
||||||
require.Equal(t, 1, len(m["bar"].Outputs))
|
require.Equal(t, 1, len(m["bar"].Outputs))
|
||||||
require.Equal(t, []string{"type=docker"}, m["bar"].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(m["bar"].Outputs))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,12 +441,10 @@ func TestLoadAndPushOverride(t *testing.T) {
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
|
|
||||||
require.Equal(t, 1, len(m["foo"].Outputs))
|
require.Equal(t, 1, len(m["foo"].Outputs))
|
||||||
sort.Strings(m["foo"].Outputs)
|
require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
|
||||||
require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
|
|
||||||
|
|
||||||
require.Equal(t, 2, len(m["bar"].Outputs))
|
require.Equal(t, 2, len(m["bar"].Outputs))
|
||||||
sort.Strings(m["bar"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=image,push=true"}, stringify(m["bar"].Outputs))
|
||||||
require.Equal(t, []string{"type=docker", "type=image,push=true"}, m["bar"].Outputs)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("type registry", func(t *testing.T) {
|
t.Run("type registry", func(t *testing.T) {
|
||||||
|
@ -461,8 +460,7 @@ func TestLoadAndPushOverride(t *testing.T) {
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
|
|
||||||
require.Equal(t, 2, len(m["foo"].Outputs))
|
require.Equal(t, 2, len(m["foo"].Outputs))
|
||||||
sort.Strings(m["foo"].Outputs)
|
require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["foo"].Outputs))
|
||||||
require.Equal(t, []string{"type=docker", "type=registry"}, m["foo"].Outputs)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -674,7 +672,7 @@ func TestOverrideMerge(t *testing.T) {
|
||||||
|
|
||||||
require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
|
require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
|
||||||
require.Equal(t, 1, len(m["app"].Outputs))
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
require.Equal(t, "type=registry", m["app"].Outputs[0].String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadContexts(t *testing.T) {
|
func TestReadContexts(t *testing.T) {
|
||||||
|
@ -840,7 +838,7 @@ func TestReadContextFromTargetChain(t *testing.T) {
|
||||||
mid, ok := m["mid"]
|
mid, ok := m["mid"]
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.Equal(t, 1, len(mid.Outputs))
|
require.Equal(t, 1, len(mid.Outputs))
|
||||||
require.Equal(t, "type=cacheonly", mid.Outputs[0])
|
require.Equal(t, "type=cacheonly", mid.Outputs[0].String())
|
||||||
require.Equal(t, 1, len(mid.Contexts))
|
require.Equal(t, 1, len(mid.Contexts))
|
||||||
|
|
||||||
base, ok := m["base"]
|
base, ok := m["base"]
|
||||||
|
@ -924,7 +922,8 @@ func TestReadTargetsDefault(t *testing.T) {
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
target "default" {
|
target "default" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -942,7 +941,8 @@ func TestReadTargetsSpecified(t *testing.T) {
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
target "image" {
|
target "image" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
@ -967,7 +967,8 @@ group "foo" {
|
||||||
}
|
}
|
||||||
target "image" {
|
target "image" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -993,7 +994,8 @@ target "foo" {
|
||||||
}
|
}
|
||||||
target "image" {
|
target "image" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1036,7 +1038,8 @@ target "image-release" {
|
||||||
inherits = ["image"]
|
inherits = ["image"]
|
||||||
output = ["type=image,push=true"]
|
output = ["type=image,push=true"]
|
||||||
tags = ["user/app:latest"]
|
tags = ["user/app:latest"]
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
fyml := File{
|
fyml := File{
|
||||||
Name: "docker-compose.yml",
|
Name: "docker-compose.yml",
|
||||||
|
@ -1060,7 +1063,8 @@ services:
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
image: ct-fake-aws:bar`)}
|
image: ct-fake-aws:bar`),
|
||||||
|
}
|
||||||
|
|
||||||
fjson := File{
|
fjson := File{
|
||||||
Name: "docker-bake.json",
|
Name: "docker-bake.json",
|
||||||
|
@ -1081,7 +1085,8 @@ services:
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1089,7 +1094,7 @@ services:
|
||||||
require.Equal(t, []string{"image"}, g["default"].Targets)
|
require.Equal(t, []string{"image"}, g["default"].Targets)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Equal(t, 1, len(m["image"].Outputs))
|
require.Equal(t, 1, len(m["image"].Outputs))
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1097,7 +1102,7 @@ services:
|
||||||
require.Equal(t, []string{"image-release"}, g["default"].Targets)
|
require.Equal(t, []string{"image-release"}, g["default"].Targets)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1106,7 +1111,7 @@ services:
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, ".", *m["image"].Context)
|
require.Equal(t, ".", *m["image"].Context)
|
||||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1166,7 +1171,8 @@ target "foo" {
|
||||||
}
|
}
|
||||||
target "image" {
|
target "image" {
|
||||||
output = ["type=docker"]
|
output = ["type=docker"]
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1200,7 +1206,8 @@ target "foo" {
|
||||||
}
|
}
|
||||||
target "image" {
|
target "image" {
|
||||||
output = ["type=docker"]
|
output = ["type=docker"]
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1209,7 +1216,7 @@ target "image" {
|
||||||
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
|
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1218,7 +1225,7 @@ target "image" {
|
||||||
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
|
require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
require.Equal(t, "type=docker", m["image"].Outputs[0].String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNestedInherits(t *testing.T) {
|
func TestNestedInherits(t *testing.T) {
|
||||||
|
@ -1247,7 +1254,8 @@ target "c" {
|
||||||
}
|
}
|
||||||
target "d" {
|
target "d" {
|
||||||
inherits = ["b", "c"]
|
inherits = ["b", "c"]
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -1315,7 +1323,8 @@ group "default" {
|
||||||
"child1",
|
"child1",
|
||||||
"child2"
|
"child2"
|
||||||
]
|
]
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -1351,9 +1360,9 @@ group "default" {
|
||||||
require.Equal(t, []string{"child1", "child2"}, g["default"].Targets)
|
require.Equal(t, []string{"child1", "child2"}, g["default"].Targets)
|
||||||
require.Equal(t, 2, len(m))
|
require.Equal(t, 2, len(m))
|
||||||
require.Equal(t, tt.wantch1, m["child1"].Args)
|
require.Equal(t, tt.wantch1, m["child1"].Args)
|
||||||
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(m["child1"].Outputs))
|
||||||
require.Equal(t, tt.wantch2, m["child2"].Args)
|
require.Equal(t, tt.wantch2, m["child2"].Args)
|
||||||
require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(m["child2"].Outputs))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1442,7 +1451,8 @@ group "e" {
|
||||||
|
|
||||||
target "f" {
|
target "f" {
|
||||||
context = "./foo"
|
context = "./foo"
|
||||||
}`)}
|
}`),
|
||||||
|
}
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
names []string
|
names []string
|
||||||
|
@ -1721,7 +1731,7 @@ func TestAnnotations(t *testing.T) {
|
||||||
|
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Contains(t, m, "app")
|
require.Contains(t, m, "app")
|
||||||
require.Equal(t, "type=image,name=foo", m["app"].Outputs[0])
|
require.Equal(t, "type=image,name=foo", m["app"].Outputs[0].String())
|
||||||
require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0])
|
require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0])
|
||||||
|
|
||||||
require.Len(t, bo["app"].Exports, 1)
|
require.Len(t, bo["app"].Exports, 1)
|
||||||
|
@ -2008,3 +2018,12 @@ target "app" {
|
||||||
require.Contains(t, err.Error(), "FOO must be greater than 5.")
|
require.Contains(t, err.Error(), "FOO must be greater than 5.")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stringify[V fmt.Stringer](values []V) []string {
|
||||||
|
s := make([]string, len(values))
|
||||||
|
for i, v := range values {
|
||||||
|
s[i] = v.String()
|
||||||
|
}
|
||||||
|
sort.Strings(s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
|
@ -5,13 +5,14 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/v2/consts"
|
"github.com/compose-spec/compose-go/v2/consts"
|
||||||
"github.com/compose-spec/compose-go/v2/dotenv"
|
"github.com/compose-spec/compose-go/v2/dotenv"
|
||||||
"github.com/compose-spec/compose-go/v2/loader"
|
"github.com/compose-spec/compose-go/v2/loader"
|
||||||
composetypes "github.com/compose-spec/compose-go/v2/types"
|
composetypes "github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -119,14 +120,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ssh []string
|
var ssh []*buildflags.SSH
|
||||||
for _, bkey := range s.Build.SSH {
|
for _, bkey := range s.Build.SSH {
|
||||||
sshkey := composeToBuildkitSSH(bkey)
|
sshkey := composeToBuildkitSSH(bkey)
|
||||||
ssh = append(ssh, sshkey)
|
ssh = append(ssh, sshkey)
|
||||||
}
|
}
|
||||||
sort.Strings(ssh)
|
slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
|
|
||||||
var secrets []string
|
var secrets []*buildflags.Secret
|
||||||
for _, bs := range s.Build.Secrets {
|
for _, bs := range s.Build.Secrets {
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -142,6 +145,16 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||||
labels[k] = &v
|
labels[k] = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheFrom, err := parseCacheArrValues(s.Build.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTo, err := parseCacheArrValues(s.Build.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
g.Targets = append(g.Targets, targetName)
|
g.Targets = append(g.Targets, targetName)
|
||||||
t := &Target{
|
t := &Target{
|
||||||
Name: targetName,
|
Name: targetName,
|
||||||
|
@ -158,8 +171,8 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: cacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
CacheTo: cacheTo,
|
||||||
NetworkMode: networkModeP,
|
NetworkMode: networkModeP,
|
||||||
SSH: ssh,
|
SSH: ssh,
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
|
@ -297,8 +310,10 @@ type xbake struct {
|
||||||
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
|
||||||
}
|
}
|
||||||
|
|
||||||
type stringMap map[string]string
|
type (
|
||||||
type stringArray []string
|
stringMap map[string]string
|
||||||
|
stringArray []string
|
||||||
|
)
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
var multi []string
|
var multi []string
|
||||||
|
@ -334,23 +349,45 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheFrom) > 0 {
|
if len(xb.CacheFrom) > 0 {
|
||||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
cacheFrom, err := parseCacheArrValues(xb.CacheFrom)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheFrom = removeDupes(append(t.CacheFrom, cacheFrom...))
|
||||||
}
|
}
|
||||||
if len(xb.CacheTo) > 0 {
|
if len(xb.CacheTo) > 0 {
|
||||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
cacheTo, err := parseCacheArrValues(xb.CacheTo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.CacheTo = removeDupes(append(t.CacheTo, cacheTo...))
|
||||||
}
|
}
|
||||||
if len(xb.Secrets) > 0 {
|
if len(xb.Secrets) > 0 {
|
||||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Secrets = removeDupes(append(t.Secrets, secrets...))
|
||||||
}
|
}
|
||||||
if len(xb.SSH) > 0 {
|
if len(xb.SSH) > 0 {
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
|
||||||
sort.Strings(t.SSH)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.SSH = removeDupes(append(t.SSH, ssh...))
|
||||||
|
slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
|
||||||
|
return a.Less(b)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if len(xb.Platforms) > 0 {
|
if len(xb.Platforms) > 0 {
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
||||||
}
|
}
|
||||||
if len(xb.Outputs) > 0 {
|
if len(xb.Outputs) > 0 {
|
||||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Outputs = removeDupes(append(t.Outputs, outputs...))
|
||||||
}
|
}
|
||||||
if xb.Pull != nil {
|
if xb.Pull != nil {
|
||||||
t.Pull = xb.Pull
|
t.Pull = xb.Pull
|
||||||
|
@ -370,35 +407,30 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
|
func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
|
||||||
if psecret.External {
|
if psecret.External {
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bkattrs []string
|
secret := &buildflags.Secret{}
|
||||||
if inp.Source != "" {
|
if inp.Source != "" {
|
||||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
secret.ID = inp.Source
|
||||||
}
|
}
|
||||||
if psecret.File != "" {
|
if psecret.File != "" {
|
||||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
secret.FilePath = psecret.File
|
||||||
}
|
}
|
||||||
if psecret.Environment != "" {
|
if psecret.Environment != "" {
|
||||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
secret.Env = psecret.Environment
|
||||||
}
|
}
|
||||||
|
return secret, nil
|
||||||
return strings.Join(bkattrs, ","), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
// composeToBuildkitSSH converts secret from compose format to buildkit's
|
||||||
// csv format.
|
// csv format.
|
||||||
func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
|
func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
|
||||||
var bkattrs []string
|
bkssh := &buildflags.SSH{ID: sshKey.ID}
|
||||||
|
|
||||||
bkattrs = append(bkattrs, sshKey.ID)
|
|
||||||
|
|
||||||
if sshKey.Path != "" {
|
if sshKey.Path != "" {
|
||||||
bkattrs = append(bkattrs, sshKey.Path)
|
bkssh.Paths = []string{sshKey.Path}
|
||||||
}
|
}
|
||||||
|
return bkssh
|
||||||
return strings.Join(bkattrs, "=")
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseCompose(t *testing.T) {
|
func TestParseCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build: ./db
|
build: ./db
|
||||||
|
@ -74,14 +74,14 @@ secrets:
|
||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
"id=token,env=ENV_TOKEN",
|
|
||||||
"id=aws,src=/root/.aws/credentials",
|
"id=aws,src=/root/.aws/credentials",
|
||||||
}, c.Targets[1].Secrets)
|
"id=token,env=ENV_TOKEN",
|
||||||
|
}, stringify(c.Targets[1].Secrets))
|
||||||
|
|
||||||
require.Equal(t, "webapp2", c.Targets[2].Name)
|
require.Equal(t, "webapp2", c.Targets[2].Name)
|
||||||
require.Equal(t, "dir", *c.Targets[2].Context)
|
require.Equal(t, "dir", *c.Targets[2].Context)
|
||||||
|
@ -89,7 +89,7 @@ secrets:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
external:
|
external:
|
||||||
image: "verycooldb:1337"
|
image: "verycooldb:1337"
|
||||||
|
@ -103,7 +103,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
|
@ -129,7 +129,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
|
@ -153,7 +153,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildArgEnvCompose(t *testing.T) {
|
func TestBuildArgEnvCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
|
@ -179,7 +179,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestInconsistentComposeFile(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
entrypoint: echo 1
|
entrypoint: echo 1
|
||||||
|
@ -190,7 +190,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
networks:
|
networks:
|
||||||
|
@ -215,7 +215,7 @@ networks:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTags(t *testing.T) {
|
func TestTags(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
example:
|
example:
|
||||||
image: example
|
image: example
|
||||||
|
@ -233,7 +233,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
@ -269,7 +269,7 @@ networks:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExt(t *testing.T) {
|
func TestComposeExt(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
addon:
|
addon:
|
||||||
image: ct-addon:bar
|
image: ct-addon:bar
|
||||||
|
@ -336,23 +336,23 @@ services:
|
||||||
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
|
||||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
||||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
|
||||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
|
||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
||||||
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
|
||||||
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
func TestComposeExtDedup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
webapp:
|
webapp:
|
||||||
image: app:bar
|
image: app:bar
|
||||||
|
@ -383,9 +383,9 @@ services:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
func TestEnv(t *testing.T) {
|
||||||
|
@ -396,7 +396,7 @@ func TestEnv(t *testing.T) {
|
||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) {
|
||||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
@ -443,7 +443,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
func TestPorts(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
|
@ -664,7 +664,7 @@ target "default" {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeNullArgs(t *testing.T) {
|
func TestComposeNullArgs(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
@ -680,7 +680,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDependsOn(t *testing.T) {
|
func TestDependsOn(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
foo:
|
foo:
|
||||||
build:
|
build:
|
||||||
|
@ -711,7 +711,7 @@ services:
|
||||||
`), 0644)
|
`), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
include:
|
include:
|
||||||
- compose-foo.yml
|
- compose-foo.yml
|
||||||
|
|
||||||
|
@ -740,7 +740,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDevelop(t *testing.T) {
|
func TestDevelop(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
@ -759,7 +759,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCgroup(t *testing.T) {
|
func TestCgroup(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
@ -772,7 +772,7 @@ services:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProjectName(t *testing.T) {
|
func TestProjectName(t *testing.T) {
|
||||||
var dt = []byte(`
|
dt := []byte(`
|
||||||
services:
|
services:
|
||||||
scratch:
|
scratch:
|
||||||
build:
|
build:
|
||||||
|
|
|
@ -17,6 +17,7 @@ func TestHCLBasic(t *testing.T) {
|
||||||
target "db" {
|
target "db" {
|
||||||
context = "./db"
|
context = "./db"
|
||||||
tags = ["docker.io/tonistiigi/db"]
|
tags = ["docker.io/tonistiigi/db"]
|
||||||
|
output = ["type=image"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "webapp" {
|
target "webapp" {
|
||||||
|
@ -25,6 +26,9 @@ func TestHCLBasic(t *testing.T) {
|
||||||
args = {
|
args = {
|
||||||
buildno = "123"
|
buildno = "123"
|
||||||
}
|
}
|
||||||
|
output = [
|
||||||
|
{ type = "image" }
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "cross" {
|
target "cross" {
|
||||||
|
@ -597,6 +601,82 @@ func TestHCLAttrsCustomType(t *testing.T) {
|
||||||
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleType(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
{ type = "local", src = "path/to/cache" },
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [
|
||||||
|
{ type = "local", dest = "path/to/cache" },
|
||||||
|
]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
{ id = "mysecret2", env = "TOKEN" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/key"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, []string{"type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
|
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,env=TOKEN"}, stringify(c.Targets[0].Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCapsuleTypeVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
description = "abc"
|
||||||
|
cache-from = [
|
||||||
|
{ type = "registry", ref = "user/app:cache" },
|
||||||
|
{ type = "local", src = "path/to/cache" },
|
||||||
|
]
|
||||||
|
|
||||||
|
cache-to = [ target.app.cache-from[0] ]
|
||||||
|
|
||||||
|
output = [
|
||||||
|
{ type = "oci", dest = "../out.tar" },
|
||||||
|
]
|
||||||
|
|
||||||
|
secret = [
|
||||||
|
{ id = "mysecret", src = "/local/secret" },
|
||||||
|
]
|
||||||
|
|
||||||
|
ssh = [
|
||||||
|
{ id = "default" },
|
||||||
|
{ id = "key", paths = ["path/to/${target.app.output[0].type}"] },
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, []string{"type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
|
||||||
|
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
|
||||||
|
require.Equal(t, []string{"user/app:cache"}, stringify(c.Targets[0].CacheTo))
|
||||||
|
require.Equal(t, []string{"id=mysecret,src=/local/secret"}, stringify(c.Targets[0].Secrets))
|
||||||
|
require.Equal(t, []string{"default", "key=path/to/oci"}, stringify(c.Targets[0].SSH))
|
||||||
|
}
|
||||||
|
|
||||||
func TestHCLMultiFileAttrs(t *testing.T) {
|
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
variable "FOO" {
|
variable "FOO" {
|
||||||
|
|
|
@ -0,0 +1,355 @@
|
||||||
|
Copyright (c) 2014 HashiCorp, Inc.
|
||||||
|
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
|
@ -0,0 +1,348 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodeOptions allows customizing sections of the decoding process.
|
||||||
|
type DecodeOptions struct {
|
||||||
|
ImpliedType func(gv interface{}) (cty.Type, error)
|
||||||
|
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.decodeBodyToValue(body, ctx, rv.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeBody extracts the configuration within the given body into the given
|
||||||
|
// value. This value must be a non-nil pointer to either a struct or
|
||||||
|
// a map, where in the former case the configuration will be decoded using
|
||||||
|
// struct tags and in the latter case only attributes are allowed and their
|
||||||
|
// values are decoded into the map.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
et := val.Type()
|
||||||
|
switch et.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return o.decodeBodyToStruct(body, ctx, val)
|
||||||
|
case reflect.Map:
|
||||||
|
return o.decodeBodyToMap(body, ctx, val)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||||
|
schema, partial := ImpliedBodySchema(val.Interface())
|
||||||
|
|
||||||
|
var content *hcl.BodyContent
|
||||||
|
var leftovers hcl.Body
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
if partial {
|
||||||
|
content, leftovers, diags = body.PartialContent(schema)
|
||||||
|
} else {
|
||||||
|
content, diags = body.Content(schema)
|
||||||
|
}
|
||||||
|
if content == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(val.Type())
|
||||||
|
|
||||||
|
if tags.Body != nil {
|
||||||
|
fieldIdx := *tags.Body
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(body))
|
||||||
|
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags.Remain != nil {
|
||||||
|
fieldIdx := *tags.Remain
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
switch {
|
||||||
|
case bodyType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(leftovers))
|
||||||
|
case attrsType.AssignableTo(field.Type):
|
||||||
|
attrs, attrsDiags := leftovers.JustAttributes()
|
||||||
|
if len(attrsDiags) > 0 {
|
||||||
|
diags = append(diags, attrsDiags...)
|
||||||
|
}
|
||||||
|
fieldV.Set(reflect.ValueOf(attrs))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, fieldIdx := range tags.Attributes {
|
||||||
|
attr := content.Attributes[name]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
fieldV := val.Field(fieldIdx)
|
||||||
|
|
||||||
|
if attr == nil {
|
||||||
|
if !exprType.AssignableTo(field.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a special case, if the target is of type hcl.Expression then
|
||||||
|
// we'll assign an actual expression that evalues to a cty null,
|
||||||
|
// so the caller can deal with it within the cty realm rather
|
||||||
|
// than within the Go realm.
|
||||||
|
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||||
|
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(field.Type):
|
||||||
|
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
diags = append(diags, o.DecodeExpression(
|
||||||
|
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksByType := content.Blocks.ByType()
|
||||||
|
|
||||||
|
for typeName, fieldIdx := range tags.Blocks {
|
||||||
|
blocks := blocksByType[typeName]
|
||||||
|
field := val.Type().Field(fieldIdx)
|
||||||
|
|
||||||
|
ty := field.Type
|
||||||
|
isSlice := false
|
||||||
|
isPtr := false
|
||||||
|
if ty.Kind() == reflect.Slice {
|
||||||
|
isSlice = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
isPtr = true
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) > 1 && !isSlice {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf(
|
||||||
|
"Only one %s block is allowed. Another was defined at %s.",
|
||||||
|
typeName, blocks[0].DefRange.String(),
|
||||||
|
),
|
||||||
|
Subject: &blocks[1].DefRange,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
if isSlice || isPtr {
|
||||||
|
if val.Field(fieldIdx).IsNil() {
|
||||||
|
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||||
|
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||||
|
Subject: body.MissingItemRange().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isSlice:
|
||||||
|
elemType := ty
|
||||||
|
if isPtr {
|
||||||
|
elemType = reflect.PointerTo(ty)
|
||||||
|
}
|
||||||
|
sli := val.Field(fieldIdx)
|
||||||
|
if sli.IsNil() {
|
||||||
|
sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
if isPtr {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.New(ty))
|
||||||
|
}
|
||||||
|
v := sli.Index(i)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
sli.Index(i).Set(v)
|
||||||
|
} else {
|
||||||
|
if i >= sli.Len() {
|
||||||
|
sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sli.Len() > len(blocks) {
|
||||||
|
sli.SetLen(len(blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
val.Field(fieldIdx).Set(sli)
|
||||||
|
|
||||||
|
default:
|
||||||
|
block := blocks[0]
|
||||||
|
if isPtr {
|
||||||
|
v := val.Field(fieldIdx)
|
||||||
|
if v.IsNil() {
|
||||||
|
v = reflect.New(ty)
|
||||||
|
}
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
|
||||||
|
val.Field(fieldIdx).Set(v)
|
||||||
|
} else {
|
||||||
|
diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
attrs, diags := body.JustAttributes()
|
||||||
|
if attrs == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := reflect.MakeMap(v.Type())
|
||||||
|
|
||||||
|
for k, attr := range attrs {
|
||||||
|
switch {
|
||||||
|
case attrType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||||
|
case exprType.AssignableTo(v.Type().Elem()):
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||||
|
default:
|
||||||
|
ev := reflect.New(v.Type().Elem())
|
||||||
|
diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||||
|
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Set(mv)
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||||
|
diags := o.decodeBodyToValue(block.Body, ctx, v)
|
||||||
|
|
||||||
|
if len(block.Labels) > 0 {
|
||||||
|
blockTags := getFieldTags(v.Type())
|
||||||
|
for li, lv := range block.Labels {
|
||||||
|
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||||
|
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||||
|
o = o.withDefaults()
|
||||||
|
|
||||||
|
srcVal, diags := expr.Value(ctx)
|
||||||
|
|
||||||
|
convTy, err := o.ImpliedType(val)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVal, err = o.Convert(srcVal, convTy)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gocty.FromCtyValue(srcVal, val)
|
||||||
|
if err != nil {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Unsuitable value type",
|
||||||
|
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||||
|
Subject: expr.StartRange().Ptr(),
|
||||||
|
Context: expr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeExpression extracts the value of the given expression into the given
|
||||||
|
// value. This value must be something that gocty is able to decode into,
|
||||||
|
// since the final decoding is delegated to that package.
|
||||||
|
//
|
||||||
|
// The given EvalContext is used to resolve any variables or functions in
|
||||||
|
// expressions encountered while decoding. This may be nil to require only
|
||||||
|
// constant values, for simple applications that do not support variables or
|
||||||
|
// functions.
|
||||||
|
//
|
||||||
|
// The returned diagnostics should be inspected with its HasErrors method to
|
||||||
|
// determine if the populated value is valid and complete. If error diagnostics
|
||||||
|
// are returned then the given value may have been partially-populated but
|
||||||
|
// may still be accessed by a careful caller for static analysis and editor
|
||||||
|
// integration use-cases.
|
||||||
|
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||||
|
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o DecodeOptions) withDefaults() DecodeOptions {
|
||||||
|
if o.ImpliedType == nil {
|
||||||
|
o.ImpliedType = gocty.ImpliedType
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.Convert == nil {
|
||||||
|
o.Convert = convert.Convert
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
|
@ -0,0 +1,806 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
hclJSON "github.com/hashicorp/hcl/v2/json"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeBody(t *testing.T) {
|
||||||
|
deepEquals := func(other interface{}) func(v interface{}) bool {
|
||||||
|
return func(v interface{}) bool {
|
||||||
|
return reflect.DeepEqual(v, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNameExpression struct {
|
||||||
|
Name hcl.Expression `hcl:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withTwoAttributes struct {
|
||||||
|
A string `hcl:"a,optional"`
|
||||||
|
B string `hcl:"b,optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNestedBlock struct {
|
||||||
|
Plain string `hcl:"plain,optional"`
|
||||||
|
Nested *withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocks struct {
|
||||||
|
Nested []*withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type withListofNestedBlocksNoPointers struct {
|
||||||
|
Nested []withTwoAttributes `hcl:"nested,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
Body map[string]interface{}
|
||||||
|
Target func() interface{}
|
||||||
|
Check func(v interface{}) bool
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(struct{}{}),
|
||||||
|
deepEquals(struct{}{}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
1, // name is required
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name *string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name nil
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name,optional"`
|
||||||
|
}{}),
|
||||||
|
0,
|
||||||
|
}, // name optional
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v interface{}) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.IsNull()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(withNameExpression{}),
|
||||||
|
func(v interface{}) bool {
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
wne, valid := v.(withNameExpression)
|
||||||
|
if !valid {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if wne.Name == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nameVal, _ := wne.Name.Value(nil)
|
||||||
|
return nameVal.Equals(cty.StringVal("Ermintrude")).True()
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 23,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
}{"Ermintrude"}),
|
||||||
|
1, // Extraneous "age" property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Attrs hcl.Attributes `hcl:",remain"`
|
||||||
|
})
|
||||||
|
return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Remain.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
deepEquals(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Remain map[string]cty.Value `hcl:",remain"`
|
||||||
|
}{
|
||||||
|
Name: "Ermintrude",
|
||||||
|
Remain: map[string]cty.Value{
|
||||||
|
"living": cty.True,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 50,
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
got := gotI.(struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Body hcl.Body `hcl:",body"`
|
||||||
|
Remain hcl.Body `hcl:",remain"`
|
||||||
|
})
|
||||||
|
|
||||||
|
attrs, _ := got.Body.JustAttributes()
|
||||||
|
|
||||||
|
return got.Name == "Ermintrude" && len(attrs) == 2 &&
|
||||||
|
attrs["name"] != nil && attrs["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating no diagnostics is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
return gotI.(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle == nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle *struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// Generating one diagnostic is good enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 0
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": []map[string]interface{}{{}, {}},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle []struct{} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return len(noodle) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
//nolint:misspell
|
||||||
|
// Generating two diagnostics is good enough for this one.
|
||||||
|
// (one for the missing noodle block and the other for
|
||||||
|
// the JSON serialization detecting the missing level of
|
||||||
|
// heirarchy for the label.)
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{
|
||||||
|
"foo_foo": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{
|
||||||
|
"foo_foo": map[string]interface{}{},
|
||||||
|
"bar_baz": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
// One diagnostic is enough for this one.
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{
|
||||||
|
"foo_foo": map[string]interface{}{},
|
||||||
|
"bar_baz": map[string]interface{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodles := gotI.(struct {
|
||||||
|
Noodles []struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodles
|
||||||
|
return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"noodle": map[string]interface{}{
|
||||||
|
"foo_foo": map[string]interface{}{
|
||||||
|
"type": "rice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
makeInstantiateType(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}{}),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
noodle := gotI.(struct {
|
||||||
|
Noodle struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Type string `hcl:"type"`
|
||||||
|
} `hcl:"noodle,block"`
|
||||||
|
}).Noodle
|
||||||
|
return noodle.Name == "foo_foo" && noodle.Type == "rice"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 34,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]string(nil)),
|
||||||
|
deepEquals(map[string]string{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": "34",
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 89,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]*hcl.Attribute(nil)),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
got := gotI.(map[string]*hcl.Attribute)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"age": 13,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]hcl.Expression(nil)),
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
got := gotI.(map[string]hcl.Expression)
|
||||||
|
return len(got) == 2 && got["name"] != nil && got["age"] != nil
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "Ermintrude",
|
||||||
|
"living": true,
|
||||||
|
},
|
||||||
|
makeInstantiateType(map[string]cty.Value(nil)),
|
||||||
|
deepEquals(map[string]cty.Value{
|
||||||
|
"name": cty.StringVal("Ermintrude"),
|
||||||
|
"living": cty.True,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain "nested" block while decoding
|
||||||
|
map[string]interface{}{
|
||||||
|
"plain": "foo",
|
||||||
|
},
|
||||||
|
func() interface{} {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Plain: "bar",
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
A: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block while decoding
|
||||||
|
map[string]interface{}{
|
||||||
|
"nested": map[string]interface{}{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() interface{} {
|
||||||
|
return &withNestedBlock{
|
||||||
|
Nested: &withTwoAttributes{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
foo := gotI.(withNestedBlock)
|
||||||
|
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Retain values in "nested" block list while decoding
|
||||||
|
map[string]interface{}{
|
||||||
|
"nested": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() interface{} {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Remove additional elements from the list while decoding nested blocks
|
||||||
|
map[string]interface{}{
|
||||||
|
"nested": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() interface{} {
|
||||||
|
return &withListofNestedBlocks{
|
||||||
|
Nested: []*withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
B: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
n := gotI.(withListofNestedBlocks)
|
||||||
|
return len(n.Nested) == 1
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Make sure decoding value slices works the same as pointer slices.
|
||||||
|
map[string]interface{}{
|
||||||
|
"nested": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"b": "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"b": "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func() interface{} {
|
||||||
|
return &withListofNestedBlocksNoPointers{
|
||||||
|
Nested: []withTwoAttributes{
|
||||||
|
{
|
||||||
|
B: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func(gotI interface{}) bool {
|
||||||
|
n := gotI.(withListofNestedBlocksNoPointers)
|
||||||
|
return n.Nested[0].B == "bar" && len(n.Nested) == 2
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
// For convenience here we're going to use the JSON parser
|
||||||
|
// to process the given body.
|
||||||
|
buf, err := json.Marshal(test.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(string(buf), func(t *testing.T) {
|
||||||
|
file, diags := hclJSON.Parse(buf, "test.json")
|
||||||
|
if len(diags) != 0 {
|
||||||
|
t.Fatalf("diagnostics while parsing: %s", diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
targetVal := reflect.ValueOf(test.Target())
|
||||||
|
|
||||||
|
diags = DecodeBody(file.Body, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !test.Check(got) {
|
||||||
|
t.Errorf("wrong result\ngot: %s", spew.Sdump(got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeExpression(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Value cty.Value
|
||||||
|
Target interface{}
|
||||||
|
Want interface{}
|
||||||
|
DiagCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
"",
|
||||||
|
"hello",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
cty.NilVal,
|
||||||
|
cty.StringVal("hello"),
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NumberIntVal(2),
|
||||||
|
"",
|
||||||
|
"2",
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.StringVal("true"),
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.NullVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // null value is not allowed
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.UnknownVal(cty.String),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
1, // value must be known
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cty.ListVal([]cty.Value{cty.True}),
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
1, // bool required
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
|
||||||
|
expr := &fixedExpression{test.Value}
|
||||||
|
|
||||||
|
targetVal := reflect.New(reflect.TypeOf(test.Target))
|
||||||
|
|
||||||
|
diags := DecodeExpression(expr, nil, targetVal.Interface())
|
||||||
|
if len(diags) != test.DiagCount {
|
||||||
|
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
|
||||||
|
for _, diag := range diags {
|
||||||
|
t.Logf(" - %s", diag.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := targetVal.Elem().Interface()
|
||||||
|
if !reflect.DeepEqual(got, test.Want) {
|
||||||
|
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fixedExpression struct {
|
||||||
|
val cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||||
|
return e.val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Range() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) StartRange() (r hcl.Range) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *fixedExpression) Variables() []hcl.Traversal {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeInstantiateType(target interface{}) func() interface{} {
|
||||||
|
return func() interface{} {
|
||||||
|
return reflect.New(reflect.TypeOf(target)).Interface()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||||
|
//
|
||||||
|
// It provides a convenient and concise way of describing the schema for
|
||||||
|
// configuration and then accessing the resulting data via native Go
|
||||||
|
// types.
|
||||||
|
//
|
||||||
|
// A struct field tag scheme is used, similar to other decoding and
|
||||||
|
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||||
|
//
|
||||||
|
// ThingType string `hcl:"thing_type,attr"`
|
||||||
|
//
|
||||||
|
// Within each tag there are two comma-separated tokens. The first is the
|
||||||
|
// name of the corresponding construct in configuration, while the second
|
||||||
|
// is a keyword giving the kind of construct expected. The following
|
||||||
|
// kind keywords are supported:
|
||||||
|
//
|
||||||
|
// attr (the default) indicates that the value is to be populated from an attribute
|
||||||
|
// block indicates that the value is to populated from a block
|
||||||
|
// label indicates that the value is to populated from a block label
|
||||||
|
// optional is the same as attr, but the field is optional
|
||||||
|
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||||
|
//
|
||||||
|
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||||
|
// expression is assigned, or of any type accepted by gocty, in which case
|
||||||
|
// gocty will be used to assign the value to a native Go type.
|
||||||
|
//
|
||||||
|
// "block" fields may be a struct that recursively uses the same tags, or a
|
||||||
|
// slice of such structs, in which case multiple blocks of the corresponding
|
||||||
|
// type are decoded into the slice.
|
||||||
|
//
|
||||||
|
// "body" can be placed on a single field of type hcl.Body to capture
|
||||||
|
// the full hcl.Body that was decoded for a block. This does not allow leftover
|
||||||
|
// values like "remain", so a decoding error will still be returned if leftover
|
||||||
|
// fields are given. If you want to capture the decoding body PLUS leftover
|
||||||
|
// fields, you must specify a "remain" field as well to prevent errors. The
|
||||||
|
// body field and the remain field will both contain the leftover fields.
|
||||||
|
//
|
||||||
|
// "label" fields are considered only in a struct used as the type of a field
|
||||||
|
// marked as "block", and are used sequentially to capture the labels of
|
||||||
|
// the blocks being decoded. In this case, the name token is used only as
|
||||||
|
// an identifier for the label in diagnostic messages.
|
||||||
|
//
|
||||||
|
// "optional" fields behave like "attr" fields, but they are optional
|
||||||
|
// and will not give parsing errors if they are missing.
|
||||||
|
//
|
||||||
|
// "remain" can be placed on a single field that may be either of type
|
||||||
|
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||||
|
// placed into this field for delayed processing. If no "remain" field is
|
||||||
|
// present then any attributes or blocks not matched by another valid tag
|
||||||
|
// will cause an error diagnostic.
|
||||||
|
//
|
||||||
|
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||||
|
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||||
|
// on the constraints there.
|
||||||
|
//
|
||||||
|
// Broadly-speaking this package deals with two types of error. The first is
|
||||||
|
// errors in the configuration itself, which are returned as diagnostics
|
||||||
|
// written with the configuration author as the target audience. The second
|
||||||
|
// is bugs in the calling program, such as invalid struct tags, which are
|
||||||
|
// surfaced via panics since there can be no useful runtime handling of such
|
||||||
|
// errors and they should certainly not be returned to the user as diagnostics.
|
||||||
|
package gohcl
|
|
@ -0,0 +1,192 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||||
|
// attributes and blocks derived from the given value, which must be a
|
||||||
|
// struct value or a pointer to a struct value with the struct tags defined
|
||||||
|
// in this package.
|
||||||
|
//
|
||||||
|
// This function can work only with fully-decoded data. It will ignore any
|
||||||
|
// fields tagged as "remain", any fields that decode attributes into either
|
||||||
|
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||||
|
// into hcl.Attributes values. This function does not have enough information
|
||||||
|
// to complete the decoding of these types.
|
||||||
|
//
|
||||||
|
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||||
|
// to produce a whole hclwrite.Block including block labels.
|
||||||
|
//
|
||||||
|
// As long as a suitable value is given to encode and the destination body
|
||||||
|
// is non-nil, this function will always complete. It will panic in case of
|
||||||
|
// any errors in the calling program, such as passing an inappropriate type
|
||||||
|
// or a nil body.
|
||||||
|
//
|
||||||
|
// The layout of the resulting HCL source is derived from the ordering of
|
||||||
|
// the struct fields, with blank lines around nested blocks of different types.
|
||||||
|
// Fields representing attributes should usually precede those representing
|
||||||
|
// blocks so that the attributes can group togather in the result. For more
|
||||||
|
// control, use the hclwrite API directly.
|
||||||
|
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
populateBody(rv, ty, tags, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||||
|
// the given value, which must be a struct or pointer to struct with the
|
||||||
|
// struct tags defined in this package.
|
||||||
|
//
|
||||||
|
// If the given struct type has fields tagged with "label" tags then they
|
||||||
|
// will be used in order to annotate the created block with labels.
|
||||||
|
//
|
||||||
|
// This function has the same constraints as EncodeIntoBody and will panic
|
||||||
|
// if they are violated.
|
||||||
|
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
|
||||||
|
rv := reflect.ValueOf(val)
|
||||||
|
ty := rv.Type()
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
rv = rv.Elem()
|
||||||
|
ty = rv.Type()
|
||||||
|
}
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
labels := make([]string, len(tags.Labels))
|
||||||
|
for i, lf := range tags.Labels {
|
||||||
|
lv := rv.Field(lf.FieldIndex)
|
||||||
|
// We just stringify whatever we find. It should always be a string
|
||||||
|
// but if not then we'll still do something reasonable.
|
||||||
|
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := hclwrite.NewBlock(blockType, labels)
|
||||||
|
populateBody(rv, ty, tags, block.Body())
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||||
|
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||||
|
for n, i := range tags.Attributes {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
for n, i := range tags.Blocks {
|
||||||
|
nameIdxs[n] = i
|
||||||
|
namesOrder = append(namesOrder, n)
|
||||||
|
}
|
||||||
|
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||||
|
ni, nj := namesOrder[i], namesOrder[j]
|
||||||
|
return nameIdxs[ni] < nameIdxs[nj]
|
||||||
|
})
|
||||||
|
|
||||||
|
dst.Clear()
|
||||||
|
|
||||||
|
prevWasBlock := false
|
||||||
|
for _, name := range namesOrder {
|
||||||
|
fieldIdx := nameIdxs[name]
|
||||||
|
field := ty.Field(fieldIdx)
|
||||||
|
fieldTy := field.Type
|
||||||
|
fieldVal := rv.Field(fieldIdx)
|
||||||
|
|
||||||
|
if fieldTy.Kind() == reflect.Ptr {
|
||||||
|
fieldTy = fieldTy.Elem()
|
||||||
|
fieldVal = fieldVal.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||||
|
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
if prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = false
|
||||||
|
}
|
||||||
|
|
||||||
|
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||||
|
if err != nil {
|
||||||
|
// This should never happen, since we should always be able
|
||||||
|
// to decode into the implied type.
|
||||||
|
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.SetAttributeValue(name, val)
|
||||||
|
} else { // must be a block, then
|
||||||
|
elemTy := fieldTy
|
||||||
|
isSeq := false
|
||||||
|
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||||
|
isSeq = true
|
||||||
|
elemTy = elemTy.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||||
|
continue // ignore undecoded fields
|
||||||
|
}
|
||||||
|
prevWasBlock = false
|
||||||
|
|
||||||
|
if isSeq {
|
||||||
|
l := fieldVal.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
elemVal := fieldVal.Index(i)
|
||||||
|
if !elemVal.IsValid() {
|
||||||
|
continue // ignore (elem value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !fieldVal.IsValid() {
|
||||||
|
continue // ignore (field value is nil pointer)
|
||||||
|
}
|
||||||
|
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||||
|
continue // ignore
|
||||||
|
}
|
||||||
|
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||||
|
if !prevWasBlock {
|
||||||
|
dst.AppendNewline()
|
||||||
|
prevWasBlock = true
|
||||||
|
}
|
||||||
|
dst.AppendBlock(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2/gohcl"
|
||||||
|
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleEncodeIntoBody() {
|
||||||
|
type Service struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Exe []string `hcl:"executable"`
|
||||||
|
}
|
||||||
|
type Constraints struct {
|
||||||
|
OS string `hcl:"os"`
|
||||||
|
Arch string `hcl:"arch"`
|
||||||
|
}
|
||||||
|
type App struct {
|
||||||
|
Name string `hcl:"name"`
|
||||||
|
Desc string `hcl:"description"`
|
||||||
|
Constraints *Constraints `hcl:"constraints,block"`
|
||||||
|
Services []Service `hcl:"service,block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
app := App{
|
||||||
|
Name: "awesome-app",
|
||||||
|
Desc: "Such an awesome application",
|
||||||
|
Constraints: &Constraints{
|
||||||
|
OS: "linux",
|
||||||
|
Arch: "amd64",
|
||||||
|
},
|
||||||
|
Services: []Service{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Exe: []string{"./web", "--listen=:8080"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "worker",
|
||||||
|
Exe: []string{"./worker"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
f := hclwrite.NewEmptyFile()
|
||||||
|
gohcl.EncodeIntoBody(&app, f.Body())
|
||||||
|
fmt.Printf("%s", f.Bytes())
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// name = "awesome-app"
|
||||||
|
// description = "Such an awesome application"
|
||||||
|
//
|
||||||
|
// constraints {
|
||||||
|
// os = "linux"
|
||||||
|
// arch = "amd64"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// service "web" {
|
||||||
|
// executable = ["./web", "--listen=:8080"]
|
||||||
|
// }
|
||||||
|
// service "worker" {
|
||||||
|
// executable = ["./worker"]
|
||||||
|
// }
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||||
|
// given value, which must be a struct value or a pointer to one. If an
|
||||||
|
// inappropriate value is passed, this function will panic.
|
||||||
|
//
|
||||||
|
// The second return argument indicates whether the given struct includes
|
||||||
|
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||||
|
//
|
||||||
|
// This uses the tags on the fields of the struct to discover how each
|
||||||
|
// field's value should be expressed within configuration. If an invalid
|
||||||
|
// mapping is attempted, this function will panic.
|
||||||
|
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
|
||||||
|
ty := reflect.TypeOf(val)
|
||||||
|
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrSchemas []hcl.AttributeSchema
|
||||||
|
var blockSchemas []hcl.BlockHeaderSchema
|
||||||
|
|
||||||
|
tags := getFieldTags(ty)
|
||||||
|
|
||||||
|
attrNames := make([]string, 0, len(tags.Attributes))
|
||||||
|
for n := range tags.Attributes {
|
||||||
|
attrNames = append(attrNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(attrNames)
|
||||||
|
for _, n := range attrNames {
|
||||||
|
idx := tags.Attributes[n]
|
||||||
|
optional := tags.Optional[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
|
||||||
|
var required bool
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case field.Type.AssignableTo(exprType):
|
||||||
|
//nolint:misspell
|
||||||
|
// If we're decoding to hcl.Expression then absense can be
|
||||||
|
// indicated via a null value, so we don't specify that
|
||||||
|
// the field is required during decoding.
|
||||||
|
required = false
|
||||||
|
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||||
|
required = true
|
||||||
|
default:
|
||||||
|
required = false
|
||||||
|
}
|
||||||
|
|
||||||
|
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||||
|
Name: n,
|
||||||
|
Required: required,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
blockNames := make([]string, 0, len(tags.Blocks))
|
||||||
|
for n := range tags.Blocks {
|
||||||
|
blockNames = append(blockNames, n)
|
||||||
|
}
|
||||||
|
sort.Strings(blockNames)
|
||||||
|
for _, n := range blockNames {
|
||||||
|
idx := tags.Blocks[n]
|
||||||
|
field := ty.Field(idx)
|
||||||
|
fty := field.Type
|
||||||
|
if fty.Kind() == reflect.Slice {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() == reflect.Ptr {
|
||||||
|
fty = fty.Elem()
|
||||||
|
}
|
||||||
|
if fty.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf(
|
||||||
|
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
ftags := getFieldTags(fty)
|
||||||
|
var labelNames []string
|
||||||
|
if len(ftags.Labels) > 0 {
|
||||||
|
labelNames = make([]string, len(ftags.Labels))
|
||||||
|
for i, l := range ftags.Labels {
|
||||||
|
labelNames[i] = l.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||||
|
Type: n,
|
||||||
|
LabelNames: labelNames,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
partial = tags.Remain != nil
|
||||||
|
schema = &hcl.BodySchema{
|
||||||
|
Attributes: attrSchemas,
|
||||||
|
Blocks: blockSchemas,
|
||||||
|
}
|
||||||
|
return schema, partial
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldTags struct {
|
||||||
|
Attributes map[string]int
|
||||||
|
Blocks map[string]int
|
||||||
|
Labels []labelField
|
||||||
|
Remain *int
|
||||||
|
Body *int
|
||||||
|
Optional map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type labelField struct {
|
||||||
|
FieldIndex int
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||||
|
ret := &fieldTags{
|
||||||
|
Attributes: map[string]int{},
|
||||||
|
Blocks: map[string]int{},
|
||||||
|
Optional: map[string]bool{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := ty.NumField()
|
||||||
|
for i := 0; i < ct; i++ {
|
||||||
|
field := ty.Field(i)
|
||||||
|
tag := field.Tag.Get("hcl")
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := strings.Index(tag, ",")
|
||||||
|
var name, kind string
|
||||||
|
if comma != -1 {
|
||||||
|
name = tag[:comma]
|
||||||
|
kind = tag[comma+1:]
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
kind = "attr"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case "attr":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
case "block":
|
||||||
|
ret.Blocks[name] = i
|
||||||
|
case "label":
|
||||||
|
ret.Labels = append(ret.Labels, labelField{
|
||||||
|
FieldIndex: i,
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
case "remain":
|
||||||
|
if ret.Remain != nil {
|
||||||
|
panic("only one 'remain' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Remain = &idx
|
||||||
|
case "body":
|
||||||
|
if ret.Body != nil {
|
||||||
|
panic("only one 'body' tag is permitted")
|
||||||
|
}
|
||||||
|
idx := i // copy, because this loop will continue assigning to i
|
||||||
|
ret.Body = &idx
|
||||||
|
case "optional":
|
||||||
|
ret.Attributes[name] = i
|
||||||
|
ret.Optional[name] = true
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,233 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestImpliedBodySchema(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
val interface{}
|
||||||
|
wantSchema *hcl.BodySchema
|
||||||
|
wantPartial bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
struct{}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Ignored bool
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr1 bool `hcl:"attr1"`
|
||||||
|
Attr2 bool `hcl:"attr2"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr1",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "attr2",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Attr *bool `hcl:"attr,attr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "attr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct{} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing []struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing *struct {
|
||||||
|
Type string `hcl:"type,label"`
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"type", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
Something string `hcl:"something"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Thing struct {
|
||||||
|
Name string `hcl:"name,label"`
|
||||||
|
} `hcl:"thing,block"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Blocks: []hcl.BlockHeaderSchema{
|
||||||
|
{
|
||||||
|
Type: "thing",
|
||||||
|
LabelNames: []string{"name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Doodad string `hcl:"doodad"`
|
||||||
|
Config string `hcl:",remain"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "doodad",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Expr hcl.Expression `hcl:"expr"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "expr",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
Meh string `hcl:"meh,optional"`
|
||||||
|
}{},
|
||||||
|
&hcl.BodySchema{
|
||||||
|
Attributes: []hcl.AttributeSchema{
|
||||||
|
{
|
||||||
|
Name: "meh",
|
||||||
|
Required: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
|
||||||
|
schema, partial := ImpliedBodySchema(test.val)
|
||||||
|
if !reflect.DeepEqual(schema, test.wantSchema) {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong schema\ngot: %s\nwant: %s",
|
||||||
|
spew.Sdump(schema), spew.Sdump(test.wantSchema),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if partial != test.wantPartial {
|
||||||
|
t.Errorf(
|
||||||
|
"wrong partial flag\ngot: %#v\nwant: %#v",
|
||||||
|
partial, test.wantPartial,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package gohcl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var victimExpr hcl.Expression
|
||||||
|
var victimBody hcl.Body
|
||||||
|
|
||||||
|
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||||
|
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||||
|
var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
|
||||||
|
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||||
|
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
|
@ -10,12 +10,11 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/bake/hclparser/gohcl"
|
||||||
"github.com/docker/buildx/util/userfunc"
|
"github.com/docker/buildx/util/userfunc"
|
||||||
"github.com/hashicorp/hcl/v2"
|
"github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/gohcl"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zclconf/go-cty/cty"
|
"github.com/zclconf/go-cty/cty"
|
||||||
"github.com/zclconf/go-cty/cty/gocty"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
|
@ -454,8 +453,9 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode!
|
// decode!
|
||||||
diag = gohcl.DecodeBody(body(), ectx, output.Interface())
|
diag = decodeBody(body(), ectx, output.Interface())
|
||||||
if diag.HasErrors() {
|
if diag.HasErrors() {
|
||||||
|
fmt.Printf("unable to decode due to errors: %s\n", diag.Error())
|
||||||
return diag
|
return diag
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -476,11 +476,11 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err
|
||||||
}
|
}
|
||||||
|
|
||||||
// store the result into the evaluation context (so it can be referenced)
|
// store the result into the evaluation context (so it can be referenced)
|
||||||
outputType, err := gocty.ImpliedType(output.Interface())
|
outputType, err := ImpliedType(output.Interface())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
|
outputValue, err := ToCtyValue(output.Interface(), outputType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -983,3 +983,8 @@ func key(ks ...any) uint64 {
|
||||||
}
|
}
|
||||||
return hash.Sum64()
|
return hash.Sum64()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||||
|
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
|
||||||
|
return dec.DecodeBody(body, ctx, val)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,160 @@
|
||||||
|
// MIT License
|
||||||
|
//
|
||||||
|
// Copyright (c) 2017-2018 Martin Atkins
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
|
||||||
|
// to find a suitable cty.Type instance that could be used for a conversion
|
||||||
|
// with ToCtyValue.
|
||||||
|
//
|
||||||
|
// This allows -- for simple situations at least -- types to be defined just
|
||||||
|
// once in Go and the cty types derived from the Go types, but in the process
|
||||||
|
// it makes some assumptions that may be undesirable so applications are
|
||||||
|
// encouraged to build their cty types directly if exacting control is
|
||||||
|
// required.
|
||||||
|
//
|
||||||
|
// Not all Go types can be represented as cty types, so an error may be
|
||||||
|
// returned which is usually considered to be a bug in the calling program.
|
||||||
|
// In particular, ImpliedType will never use capsule types in its returned
|
||||||
|
// type, because it cannot know the capsule types supported by the calling
|
||||||
|
// program.
|
||||||
|
func ImpliedType(gv interface{}) (cty.Type, error) {
|
||||||
|
rt := reflect.TypeOf(gv)
|
||||||
|
var path cty.Path
|
||||||
|
return impliedType(rt, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if ety, err := impliedTypeExt(rt, path); err == nil {
|
||||||
|
return ety, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch rt.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
return impliedType(rt.Elem(), path)
|
||||||
|
|
||||||
|
// Primitive types
|
||||||
|
case reflect.Bool:
|
||||||
|
return cty.Bool, nil
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return cty.Number, nil
|
||||||
|
case reflect.String:
|
||||||
|
return cty.String, nil
|
||||||
|
|
||||||
|
// Collection types
|
||||||
|
case reflect.Slice:
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.List(ety), nil
|
||||||
|
case reflect.Map:
|
||||||
|
if !stringType.AssignableTo(rt.Key()) {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
|
||||||
|
}
|
||||||
|
path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
|
||||||
|
ety, err := impliedType(rt.Elem(), path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
return cty.Map(ety), nil
|
||||||
|
|
||||||
|
// Structural types
|
||||||
|
case reflect.Struct:
|
||||||
|
return impliedStructType(rt, path)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
|
||||||
|
if valueType.AssignableTo(rt) {
|
||||||
|
// Special case: cty.Value represents cty.DynamicPseudoType, for
|
||||||
|
// type conformance checking.
|
||||||
|
return cty.DynamicPseudoType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIdxs := structTagIndices(rt)
|
||||||
|
if len(fieldIdxs) == 0 {
|
||||||
|
return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
atys := make(map[string]cty.Type, len(fieldIdxs))
|
||||||
|
|
||||||
|
{
|
||||||
|
// Temporary extension of path for attributes
|
||||||
|
path := append(path, nil)
|
||||||
|
|
||||||
|
for k, fi := range fieldIdxs {
|
||||||
|
path[len(path)-1] = cty.GetAttrStep{Name: k}
|
||||||
|
|
||||||
|
ft := rt.Field(fi).Type
|
||||||
|
aty, err := impliedType(ft, path)
|
||||||
|
if err != nil {
|
||||||
|
return cty.NilType, err
|
||||||
|
}
|
||||||
|
|
||||||
|
atys[k] = aty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.Object(atys), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
valueType = reflect.TypeOf(cty.Value{})
|
||||||
|
stringType = reflect.TypeOf("")
|
||||||
|
)
|
||||||
|
|
||||||
|
// structTagIndices interrogates the fields of the given type (which must
|
||||||
|
// be a struct type, or we'll panic) and returns a map from the cty
|
||||||
|
// attribute names declared via struct tags to the indices of the
|
||||||
|
// fields holding those tags.
|
||||||
|
//
|
||||||
|
// This function will panic if two fields within the struct are tagged with
|
||||||
|
// the same cty attribute name.
|
||||||
|
func structTagIndices(st reflect.Type) map[string]int {
|
||||||
|
ct := st.NumField()
|
||||||
|
ret := make(map[string]int, ct)
|
||||||
|
|
||||||
|
for i := 0; i < ct; i++ {
|
||||||
|
field := st.Field(i)
|
||||||
|
attrName := field.Tag.Get("cty")
|
||||||
|
if attrName != "" {
|
||||||
|
ret[attrName] = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,145 @@
|
||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/errdefs"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CapsuleValue interface {
|
||||||
|
// FromCtyValue will initialize this value using a cty.Value.
|
||||||
|
FromCtyValue(in cty.Value, path cty.Path) error
|
||||||
|
|
||||||
|
// ToCtyValue will convert this capsule value into a native
|
||||||
|
// cty.Value. This should not return a capsule type.
|
||||||
|
ToCtyValue() cty.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
nativeTypeExtension extensionType = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
|
||||||
|
if rt.AssignableTo(capsuleValueType) {
|
||||||
|
return capsuleValueCapsuleType(rt), nil
|
||||||
|
}
|
||||||
|
return cty.NilType, errdefs.ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
capsuleValueType = reflect.TypeFor[CapsuleValue]()
|
||||||
|
capsuleValueTypes sync.Map
|
||||||
|
)
|
||||||
|
|
||||||
|
func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
|
||||||
|
if val, loaded := capsuleValueTypes.Load(rt); loaded {
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// First time used.
|
||||||
|
ety := cty.CapsuleWithOps(rt.Name(), rt.Elem(), &cty.CapsuleOps{
|
||||||
|
ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
|
||||||
|
return func(in cty.Value, p cty.Path) (any, error) {
|
||||||
|
rv := reflect.New(rt.Elem()).Interface()
|
||||||
|
if err := rv.(CapsuleValue).FromCtyValue(in, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
|
||||||
|
return func(in any, _ cty.Path) (cty.Value, error) {
|
||||||
|
v := in.(CapsuleValue).ToCtyValue()
|
||||||
|
return convert.Convert(v, want)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ExtensionData: func(key any) any {
|
||||||
|
switch key {
|
||||||
|
case nativeTypeExtension:
|
||||||
|
zero := reflect.Zero(rt).Interface()
|
||||||
|
return zero.(CapsuleValue).ToCtyValue().Type()
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Attempt to store the new type. Use whichever was loaded first in the case of a race condition.
|
||||||
|
val, _ := capsuleValueTypes.LoadOrStore(rt, ety)
|
||||||
|
return val.(cty.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToNativeValue will convert a value to only native cty types which will
|
||||||
|
// remove capsule types if possible.
|
||||||
|
func ToNativeValue(in cty.Value) cty.Value {
|
||||||
|
want := toNativeType(in.Type())
|
||||||
|
if in.Type().Equals(want) {
|
||||||
|
return in
|
||||||
|
} else if out, err := convert.Convert(in, want); err == nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return cty.NullVal(want)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toNativeType(in cty.Type) cty.Type {
|
||||||
|
if et := in.MapElementType(); et != nil {
|
||||||
|
return cty.Map(toNativeType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.SetElementType(); et != nil {
|
||||||
|
return cty.Set(toNativeType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if et := in.ListElementType(); et != nil {
|
||||||
|
return cty.List(toNativeType(*et))
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsObjectType() {
|
||||||
|
var optional []string
|
||||||
|
inAttrTypes := in.AttributeTypes()
|
||||||
|
outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
|
||||||
|
for name, typ := range inAttrTypes {
|
||||||
|
outAttrTypes[name] = toNativeType(typ)
|
||||||
|
if in.AttributeOptional(name) {
|
||||||
|
optional = append(optional, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(optional) == 0 {
|
||||||
|
return cty.Object(outAttrTypes)
|
||||||
|
}
|
||||||
|
return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsTupleType() {
|
||||||
|
inTypes := in.TupleElementTypes()
|
||||||
|
outTypes := make([]cty.Type, len(inTypes))
|
||||||
|
for i, typ := range inTypes {
|
||||||
|
outTypes[i] = toNativeType(typ)
|
||||||
|
}
|
||||||
|
return cty.Tuple(outTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.IsCapsuleType() {
|
||||||
|
if out := in.CapsuleExtensionData(nativeTypeExtension); out != nil {
|
||||||
|
return out.(cty.Type)
|
||||||
|
}
|
||||||
|
return cty.DynamicPseudoType
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
|
||||||
|
out, err := gocty.ToCtyValue(val, ty)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
return ToNativeValue(out), nil
|
||||||
|
}
|
|
@ -45,6 +45,7 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) {
|
||||||
supportDir = !tar
|
supportDir = !tar
|
||||||
case "registry":
|
case "registry":
|
||||||
out.Type = client.ExporterImage
|
out.Type = client.ExporterImage
|
||||||
|
out.Attrs["push"] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
if supportDir {
|
if supportDir {
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -15,6 +15,7 @@ require (
|
||||||
github.com/containerd/platforms v0.2.1
|
github.com/containerd/platforms v0.2.1
|
||||||
github.com/containerd/typeurl/v2 v2.2.0
|
github.com/containerd/typeurl/v2 v2.2.0
|
||||||
github.com/creack/pty v1.1.21
|
github.com/creack/pty v1.1.21
|
||||||
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/distribution/reference v0.6.0
|
github.com/distribution/reference v0.6.0
|
||||||
github.com/docker/cli v27.3.1+incompatible
|
github.com/docker/cli v27.3.1+incompatible
|
||||||
github.com/docker/cli-docs-tool v0.8.0
|
github.com/docker/cli-docs-tool v0.8.0
|
||||||
|
@ -87,7 +88,6 @@ require (
|
||||||
github.com/containerd/containerd/api v1.7.19 // indirect
|
github.com/containerd/containerd/api v1.7.19 // indirect
|
||||||
github.com/containerd/ttrpc v1.2.5 // indirect
|
github.com/containerd/ttrpc v1.2.5 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
|
|
|
@ -2,6 +2,8 @@ package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -9,66 +11,154 @@ import (
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/tonistiigi/go-csvvalue"
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
jsoncty "github.com/zclconf/go-cty/cty/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type CacheOptionsEntry struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Attrs map[string]string `json:"attrs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) Equal(other *CacheOptionsEntry) bool {
|
||||||
|
if e.Type != other.Type {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return maps.Equal(e.Attrs, other.Attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) String() string {
|
||||||
|
// Special registry syntax.
|
||||||
|
if e.Type == "registry" && len(e.Attrs) == 1 {
|
||||||
|
if ref, ok := e.Attrs["ref"]; ok {
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var b csvBuilder
|
||||||
|
if e.Type != "" {
|
||||||
|
b.Write("type", e.Type)
|
||||||
|
}
|
||||||
|
if len(e.Attrs) > 0 {
|
||||||
|
b.WriteAttributes(e.Attrs)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) ToPB() *controllerapi.CacheOptionsEntry {
|
||||||
|
return &controllerapi.CacheOptionsEntry{
|
||||||
|
Type: e.Type,
|
||||||
|
Attrs: maps.Clone(e.Attrs),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) MarshalJSON() ([]byte, error) {
|
||||||
|
m := maps.Clone(e.Attrs)
|
||||||
|
if m == nil {
|
||||||
|
m = map[string]string{}
|
||||||
|
}
|
||||||
|
m["type"] = e.Type
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) UnmarshalJSON(data []byte) error {
|
||||||
|
var m map[string]string
|
||||||
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Type = m["type"]
|
||||||
|
delete(m, "type")
|
||||||
|
|
||||||
|
e.Attrs = m
|
||||||
|
return e.validate(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) IsActive() bool {
|
||||||
|
// Always active if not gha.
|
||||||
|
if e.Type != "gha" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return e.Attrs["token"] != "" && e.Attrs["url"] != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) UnmarshalText(text []byte) error {
|
||||||
|
in := string(text)
|
||||||
|
fields, err := csvvalue.Fields(in, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
|
||||||
|
e.Type = "registry"
|
||||||
|
e.Attrs = map[string]string{"ref": fields[0]}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Type = ""
|
||||||
|
e.Attrs = map[string]string{}
|
||||||
|
|
||||||
|
for _, field := range fields {
|
||||||
|
parts := strings.SplitN(field, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errors.Errorf("invalid value %s", field)
|
||||||
|
}
|
||||||
|
key := strings.ToLower(parts[0])
|
||||||
|
value := parts[1]
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
e.Type = value
|
||||||
|
default:
|
||||||
|
e.Attrs[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Type == "" {
|
||||||
|
return errors.Errorf("type required form> %q", in)
|
||||||
|
}
|
||||||
|
addGithubToken(e)
|
||||||
|
addAwsCredentials(e)
|
||||||
|
return e.validate(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) validate(gv interface{}) error {
|
||||||
|
if e.Type == "" {
|
||||||
|
var text []byte
|
||||||
|
switch gv := gv.(type) {
|
||||||
|
case []byte:
|
||||||
|
text = gv
|
||||||
|
case string:
|
||||||
|
text = []byte(gv)
|
||||||
|
case cty.Value:
|
||||||
|
text, _ = jsoncty.Marshal(gv, gv.Type())
|
||||||
|
default:
|
||||||
|
text, _ = json.Marshal(gv)
|
||||||
|
}
|
||||||
|
return errors.Errorf("type required form> %q", string(text))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func ParseCacheEntry(in []string) ([]*controllerapi.CacheOptionsEntry, error) {
|
func ParseCacheEntry(in []string) ([]*controllerapi.CacheOptionsEntry, error) {
|
||||||
outs := make([]*controllerapi.CacheOptionsEntry, 0, len(in))
|
outs := make([]*controllerapi.CacheOptionsEntry, 0, len(in))
|
||||||
for _, in := range in {
|
for _, in := range in {
|
||||||
fields, err := csvvalue.Fields(in, nil)
|
var out CacheOptionsEntry
|
||||||
if err != nil {
|
if err := out.UnmarshalText([]byte(in)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if isRefOnlyFormat(fields) {
|
|
||||||
for _, field := range fields {
|
|
||||||
outs = append(outs, &controllerapi.CacheOptionsEntry{
|
|
||||||
Type: "registry",
|
|
||||||
Attrs: map[string]string{"ref": field},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
out := controllerapi.CacheOptionsEntry{
|
if !out.IsActive() {
|
||||||
Attrs: map[string]string{},
|
// Skip inactive cache entries.
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid value %s", field)
|
|
||||||
}
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
out.Type = value
|
|
||||||
default:
|
|
||||||
out.Attrs[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if out.Type == "" {
|
|
||||||
return nil, errors.Errorf("type required form> %q", in)
|
|
||||||
}
|
|
||||||
if !addGithubToken(&out) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addAwsCredentials(&out)
|
outs = append(outs, out.ToPB())
|
||||||
outs = append(outs, &out)
|
|
||||||
}
|
}
|
||||||
return outs, nil
|
return outs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isRefOnlyFormat(in []string) bool {
|
func addGithubToken(ci *CacheOptionsEntry) {
|
||||||
for _, v := range in {
|
|
||||||
if strings.Contains(v, "=") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func addGithubToken(ci *controllerapi.CacheOptionsEntry) bool {
|
|
||||||
if ci.Type != "gha" {
|
if ci.Type != "gha" {
|
||||||
return true
|
return
|
||||||
}
|
}
|
||||||
if _, ok := ci.Attrs["token"]; !ok {
|
if _, ok := ci.Attrs["token"]; !ok {
|
||||||
if v, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN"); ok {
|
if v, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN"); ok {
|
||||||
|
@ -80,10 +170,9 @@ func addGithubToken(ci *controllerapi.CacheOptionsEntry) bool {
|
||||||
ci.Attrs["url"] = v
|
ci.Attrs["url"] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ci.Attrs["token"] != "" && ci.Attrs["url"] != ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func addAwsCredentials(ci *controllerapi.CacheOptionsEntry) {
|
func addAwsCredentials(ci *CacheOptionsEntry) {
|
||||||
if ci.Type != "s3" {
|
if ci.Type != "s3" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,183 @@
|
||||||
|
package buildflags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
"github.com/zclconf/go-cty/cty/convert"
|
||||||
|
"github.com/zclconf/go-cty/cty/gocty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||||
|
conv, err := convert.Convert(in, cty.Map(cty.String))
|
||||||
|
if err == nil {
|
||||||
|
m := conv.AsValueMap()
|
||||||
|
if err := getAndDelete(m, "type", &e.Type); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.Attrs = asMap(m)
|
||||||
|
return e.validate(in)
|
||||||
|
}
|
||||||
|
return unmarshalTextFallback(in, e, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *CacheOptionsEntry) ToCtyValue() cty.Value {
|
||||||
|
if e == nil {
|
||||||
|
return cty.NullVal(cty.Map(cty.String))
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := make(map[string]cty.Value, len(e.Attrs)+1)
|
||||||
|
for k, v := range e.Attrs {
|
||||||
|
vals[k] = cty.StringVal(v)
|
||||||
|
}
|
||||||
|
vals["type"] = cty.StringVal(e.Type)
|
||||||
|
return cty.MapVal(vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||||
|
conv, err := convert.Convert(in, cty.Map(cty.String))
|
||||||
|
if err == nil {
|
||||||
|
m := conv.AsValueMap()
|
||||||
|
if err := getAndDelete(m, "type", &e.Type); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := getAndDelete(m, "dest", &e.Destination); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.Attrs = asMap(m)
|
||||||
|
return e.validate()
|
||||||
|
}
|
||||||
|
return unmarshalTextFallback(in, e, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) ToCtyValue() cty.Value {
|
||||||
|
if e == nil {
|
||||||
|
return cty.NullVal(cty.Map(cty.String))
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := make(map[string]cty.Value, len(e.Attrs)+2)
|
||||||
|
for k, v := range e.Attrs {
|
||||||
|
vals[k] = cty.StringVal(v)
|
||||||
|
}
|
||||||
|
vals["type"] = cty.StringVal(e.Type)
|
||||||
|
vals["dest"] = cty.StringVal(e.Destination)
|
||||||
|
return cty.MapVal(vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
var secretType = sync.OnceValue(func() cty.Type {
|
||||||
|
return cty.ObjectWithOptionalAttrs(
|
||||||
|
map[string]cty.Type{
|
||||||
|
"id": cty.String,
|
||||||
|
"src": cty.String,
|
||||||
|
"env": cty.String,
|
||||||
|
},
|
||||||
|
[]string{"id", "src", "env"},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
func (e *Secret) FromCtyValue(in cty.Value, p cty.Path) (err error) {
|
||||||
|
conv, err := convert.Convert(in, secretType())
|
||||||
|
if err == nil {
|
||||||
|
if id := conv.GetAttr("id"); !id.IsNull() {
|
||||||
|
e.ID = id.AsString()
|
||||||
|
}
|
||||||
|
if src := conv.GetAttr("src"); !src.IsNull() {
|
||||||
|
e.FilePath = src.AsString()
|
||||||
|
}
|
||||||
|
if env := conv.GetAttr("env"); !env.IsNull() {
|
||||||
|
e.Env = env.AsString()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return unmarshalTextFallback(in, e, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Secret) ToCtyValue() cty.Value {
|
||||||
|
if e == nil {
|
||||||
|
return cty.NullVal(secretType())
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.ObjectVal(map[string]cty.Value{
|
||||||
|
"id": cty.StringVal(e.ID),
|
||||||
|
"src": cty.StringVal(e.FilePath),
|
||||||
|
"env": cty.StringVal(e.Env),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var sshType = sync.OnceValue(func() cty.Type {
|
||||||
|
return cty.ObjectWithOptionalAttrs(
|
||||||
|
map[string]cty.Type{
|
||||||
|
"id": cty.String,
|
||||||
|
"paths": cty.List(cty.String),
|
||||||
|
},
|
||||||
|
[]string{"id", "paths"},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
func (e *SSH) FromCtyValue(in cty.Value, p cty.Path) (err error) {
|
||||||
|
conv, err := convert.Convert(in, sshType())
|
||||||
|
if err == nil {
|
||||||
|
if id := conv.GetAttr("id"); !id.IsNull() {
|
||||||
|
e.ID = id.AsString()
|
||||||
|
}
|
||||||
|
if paths := conv.GetAttr("paths"); !paths.IsNull() {
|
||||||
|
if err := gocty.FromCtyValue(paths, &e.Paths); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return unmarshalTextFallback(in, e, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SSH) ToCtyValue() cty.Value {
|
||||||
|
if e == nil {
|
||||||
|
return cty.NullVal(sshType())
|
||||||
|
}
|
||||||
|
|
||||||
|
var ctyPaths cty.Value
|
||||||
|
if len(e.Paths) > 0 {
|
||||||
|
paths := make([]cty.Value, len(e.Paths))
|
||||||
|
for i, path := range e.Paths {
|
||||||
|
paths[i] = cty.StringVal(path)
|
||||||
|
}
|
||||||
|
ctyPaths = cty.ListVal(paths)
|
||||||
|
} else {
|
||||||
|
ctyPaths = cty.ListValEmpty(cty.String)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cty.ObjectVal(map[string]cty.Value{
|
||||||
|
"id": cty.StringVal(e.ID),
|
||||||
|
"paths": ctyPaths,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAndDelete(m map[string]cty.Value, attr string, gv interface{}) error {
|
||||||
|
if v, ok := m[attr]; ok {
|
||||||
|
delete(m, attr)
|
||||||
|
return gocty.FromCtyValue(v, gv)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func asMap(m map[string]cty.Value) map[string]string {
|
||||||
|
out := make(map[string]string, len(m))
|
||||||
|
for k, v := range m {
|
||||||
|
out[k] = v.AsString()
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalTextFallback[V encoding.TextUnmarshaler](in cty.Value, v V, inErr error) (outErr error) {
|
||||||
|
// Attempt to convert this type to a string.
|
||||||
|
conv, err := convert.Convert(in, cty.String)
|
||||||
|
if err != nil {
|
||||||
|
// Cannot convert. Do not attempt to convert and return the original error.
|
||||||
|
return inErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conversion was successful. Use UnmarshalText on the string and return any
|
||||||
|
// errors associated with that.
|
||||||
|
return v.UnmarshalText([]byte(conv.AsString()))
|
||||||
|
}
|
|
@ -1,7 +1,10 @@
|
||||||
package buildflags
|
package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"maps"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/platforms"
|
"github.com/containerd/platforms"
|
||||||
|
@ -13,67 +16,131 @@ import (
|
||||||
"github.com/tonistiigi/go-csvvalue"
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ExportEntry struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Attrs map[string]string `json:"attrs,omitempty"`
|
||||||
|
Destination string `json:"dest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) Equal(other *ExportEntry) bool {
|
||||||
|
if e.Type != other.Type || e.Destination != other.Destination {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return maps.Equal(e.Attrs, other.Attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) String() string {
|
||||||
|
var b csvBuilder
|
||||||
|
if e.Type != "" {
|
||||||
|
b.Write("type", e.Type)
|
||||||
|
}
|
||||||
|
if e.Destination != "" {
|
||||||
|
b.Write("dest", e.Destination)
|
||||||
|
}
|
||||||
|
if len(e.Attrs) > 0 {
|
||||||
|
b.WriteAttributes(e.Attrs)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) ToPB() *controllerapi.ExportEntry {
|
||||||
|
return &controllerapi.ExportEntry{
|
||||||
|
Type: e.Type,
|
||||||
|
Attrs: maps.Clone(e.Attrs),
|
||||||
|
Destination: e.Destination,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) MarshalJSON() ([]byte, error) {
|
||||||
|
m := maps.Clone(e.Attrs)
|
||||||
|
if m == nil {
|
||||||
|
m = map[string]string{}
|
||||||
|
}
|
||||||
|
m["type"] = e.Type
|
||||||
|
if e.Destination != "" {
|
||||||
|
m["dest"] = e.Destination
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) UnmarshalJSON(data []byte) error {
|
||||||
|
var m map[string]string
|
||||||
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Type = m["type"]
|
||||||
|
delete(m, "type")
|
||||||
|
|
||||||
|
e.Destination = m["dest"]
|
||||||
|
delete(m, "dest")
|
||||||
|
|
||||||
|
e.Attrs = m
|
||||||
|
return e.validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) UnmarshalText(text []byte) error {
|
||||||
|
s := string(text)
|
||||||
|
fields, err := csvvalue.Fields(s, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the target entry.
|
||||||
|
e.Type = ""
|
||||||
|
e.Attrs = map[string]string{}
|
||||||
|
e.Destination = ""
|
||||||
|
|
||||||
|
if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
|
||||||
|
if s != "-" {
|
||||||
|
e.Type = client.ExporterLocal
|
||||||
|
e.Destination = s
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Type = client.ExporterTar
|
||||||
|
e.Destination = s
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Type == "" {
|
||||||
|
for _, field := range fields {
|
||||||
|
parts := strings.SplitN(field, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errors.Errorf("invalid value %s", field)
|
||||||
|
}
|
||||||
|
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
||||||
|
value := parts[1]
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
e.Type = value
|
||||||
|
case "dest":
|
||||||
|
e.Destination = value
|
||||||
|
default:
|
||||||
|
e.Attrs[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExportEntry) validate() error {
|
||||||
|
if e.Type == "" {
|
||||||
|
return errors.Errorf("type is required for output")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) {
|
func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) {
|
||||||
var outs []*controllerapi.ExportEntry
|
var outs []*controllerapi.ExportEntry
|
||||||
if len(inp) == 0 {
|
if len(inp) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
for _, s := range inp {
|
for _, s := range inp {
|
||||||
fields, err := csvvalue.Fields(s, nil)
|
var out ExportEntry
|
||||||
if err != nil {
|
if err := out.UnmarshalText([]byte(s)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
outs = append(outs, out.ToPB())
|
||||||
out := controllerapi.ExportEntry{
|
|
||||||
Attrs: map[string]string{},
|
|
||||||
}
|
|
||||||
if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
|
|
||||||
if s != "-" {
|
|
||||||
outs = append(outs, &controllerapi.ExportEntry{
|
|
||||||
Type: client.ExporterLocal,
|
|
||||||
Destination: s,
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out = controllerapi.ExportEntry{
|
|
||||||
Type: client.ExporterTar,
|
|
||||||
Destination: s,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if out.Type == "" {
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid value %s", field)
|
|
||||||
}
|
|
||||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
out.Type = value
|
|
||||||
default:
|
|
||||||
out.Attrs[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if out.Type == "" {
|
|
||||||
return nil, errors.Errorf("type is required for output")
|
|
||||||
}
|
|
||||||
|
|
||||||
if out.Type == "registry" {
|
|
||||||
out.Type = client.ExporterImage
|
|
||||||
if _, ok := out.Attrs["push"]; !ok {
|
|
||||||
out.Attrs["push"] = "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if dest, ok := out.Attrs["dest"]; ok {
|
|
||||||
out.Destination = dest
|
|
||||||
delete(out.Attrs, "dest")
|
|
||||||
}
|
|
||||||
|
|
||||||
outs = append(outs, &out)
|
|
||||||
}
|
}
|
||||||
return outs, nil
|
return outs, nil
|
||||||
}
|
}
|
||||||
|
@ -141,3 +208,32 @@ func ParseAnnotations(inp []string) (map[exptypes.AnnotationKey]string, error) {
|
||||||
}
|
}
|
||||||
return annotations, nil
|
return annotations, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type csvBuilder struct {
|
||||||
|
sb strings.Builder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *csvBuilder) Write(key, value string) {
|
||||||
|
if w.sb.Len() > 0 {
|
||||||
|
w.sb.WriteByte(',')
|
||||||
|
}
|
||||||
|
w.sb.WriteString(key)
|
||||||
|
w.sb.WriteByte('=')
|
||||||
|
w.sb.WriteString(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *csvBuilder) WriteAttributes(attrs map[string]string) {
|
||||||
|
keys := make([]string, 0, len(attrs))
|
||||||
|
for key := range attrs {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
w.Write(key, attrs[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *csvBuilder) String() string {
|
||||||
|
return w.sb.String()
|
||||||
|
}
|
||||||
|
|
|
@ -8,6 +8,80 @@ import (
|
||||||
"github.com/tonistiigi/go-csvvalue"
|
"github.com/tonistiigi/go-csvvalue"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Secret struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
FilePath string `json:"src,omitempty"`
|
||||||
|
Env string `json:"env,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Secret) Equal(other *Secret) bool {
|
||||||
|
return s.ID == other.ID && s.FilePath == other.FilePath && s.Env == other.Env
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Secret) String() string {
|
||||||
|
var b csvBuilder
|
||||||
|
if s.ID != "" {
|
||||||
|
b.Write("id", s.ID)
|
||||||
|
}
|
||||||
|
if s.FilePath != "" {
|
||||||
|
b.Write("src", s.FilePath)
|
||||||
|
}
|
||||||
|
if s.Env != "" {
|
||||||
|
b.Write("env", s.Env)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Secret) ToPB() *controllerapi.Secret {
|
||||||
|
return &controllerapi.Secret{
|
||||||
|
ID: s.ID,
|
||||||
|
FilePath: s.FilePath,
|
||||||
|
Env: s.Env,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Secret) UnmarshalText(text []byte) error {
|
||||||
|
value := string(text)
|
||||||
|
fields, err := csvvalue.Fields(value, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse csv secret")
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = Secret{}
|
||||||
|
|
||||||
|
var typ string
|
||||||
|
for _, field := range fields {
|
||||||
|
parts := strings.SplitN(field, "=", 2)
|
||||||
|
key := strings.ToLower(parts[0])
|
||||||
|
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return errors.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := parts[1]
|
||||||
|
switch key {
|
||||||
|
case "type":
|
||||||
|
if value != "file" && value != "env" {
|
||||||
|
return errors.Errorf("unsupported secret type %q", value)
|
||||||
|
}
|
||||||
|
typ = value
|
||||||
|
case "id":
|
||||||
|
s.ID = value
|
||||||
|
case "source", "src":
|
||||||
|
s.FilePath = value
|
||||||
|
case "env":
|
||||||
|
s.Env = value
|
||||||
|
default:
|
||||||
|
return errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if typ == "env" && s.Env == "" {
|
||||||
|
s.Env = s.FilePath
|
||||||
|
s.FilePath = ""
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func ParseSecretSpecs(sl []string) ([]*controllerapi.Secret, error) {
|
func ParseSecretSpecs(sl []string) ([]*controllerapi.Secret, error) {
|
||||||
fs := make([]*controllerapi.Secret, 0, len(sl))
|
fs := make([]*controllerapi.Secret, 0, len(sl))
|
||||||
for _, v := range sl {
|
for _, v := range sl {
|
||||||
|
@ -21,42 +95,9 @@ func ParseSecretSpecs(sl []string) ([]*controllerapi.Secret, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSecret(value string) (*controllerapi.Secret, error) {
|
func parseSecret(value string) (*controllerapi.Secret, error) {
|
||||||
fields, err := csvvalue.Fields(value, nil)
|
var s Secret
|
||||||
if err != nil {
|
if err := s.UnmarshalText([]byte(value)); err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse csv secret")
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return s.ToPB(), nil
|
||||||
fs := controllerapi.Secret{}
|
|
||||||
|
|
||||||
var typ string
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
if value != "file" && value != "env" {
|
|
||||||
return nil, errors.Errorf("unsupported secret type %q", value)
|
|
||||||
}
|
|
||||||
typ = value
|
|
||||||
case "id":
|
|
||||||
fs.ID = value
|
|
||||||
case "source", "src":
|
|
||||||
fs.FilePath = value
|
|
||||||
case "env":
|
|
||||||
fs.Env = value
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if typ == "env" && fs.Env == "" {
|
|
||||||
fs.Env = fs.FilePath
|
|
||||||
fs.FilePath = ""
|
|
||||||
}
|
|
||||||
return &fs, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,60 @@
|
||||||
package buildflags
|
package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
controllerapi "github.com/docker/buildx/controller/pb"
|
controllerapi "github.com/docker/buildx/controller/pb"
|
||||||
"github.com/moby/buildkit/util/gitutil"
|
"github.com/moby/buildkit/util/gitutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type SSH struct {
|
||||||
|
ID string `json:"id,omitempty" cty:"id"`
|
||||||
|
Paths []string `json:"paths,omitempty" cty:"paths"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SSH) Equal(other *SSH) bool {
|
||||||
|
return s.Less(other) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SSH) Less(other *SSH) int {
|
||||||
|
if s.ID != other.ID {
|
||||||
|
return cmp.Compare(s.ID, other.ID)
|
||||||
|
}
|
||||||
|
return slices.Compare(s.Paths, other.Paths)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SSH) String() string {
|
||||||
|
if len(s.Paths) == 0 {
|
||||||
|
return s.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
var b csvBuilder
|
||||||
|
paths := strings.Join(s.Paths, ",")
|
||||||
|
b.Write(s.ID, paths)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SSH) ToPB() *controllerapi.SSH {
|
||||||
|
return &controllerapi.SSH{
|
||||||
|
ID: s.ID,
|
||||||
|
Paths: s.Paths,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SSH) UnmarshalText(text []byte) error {
|
||||||
|
parts := strings.SplitN(string(text), "=", 2)
|
||||||
|
|
||||||
|
s.ID = parts[0]
|
||||||
|
if len(parts) > 1 {
|
||||||
|
s.Paths = strings.Split(parts[1], ",")
|
||||||
|
} else {
|
||||||
|
s.Paths = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func ParseSSHSpecs(sl []string) ([]*controllerapi.SSH, error) {
|
func ParseSSHSpecs(sl []string) ([]*controllerapi.SSH, error) {
|
||||||
var outs []*controllerapi.SSH
|
var outs []*controllerapi.SSH
|
||||||
if len(sl) == 0 {
|
if len(sl) == 0 {
|
||||||
|
@ -14,14 +62,11 @@ func ParseSSHSpecs(sl []string) ([]*controllerapi.SSH, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range sl {
|
for _, s := range sl {
|
||||||
parts := strings.SplitN(s, "=", 2)
|
var out SSH
|
||||||
out := controllerapi.SSH{
|
if err := out.UnmarshalText([]byte(s)); err != nil {
|
||||||
ID: parts[0],
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(parts) > 1 {
|
outs = append(outs, out.ToPB())
|
||||||
out.Paths = strings.Split(parts[1], ",")
|
|
||||||
}
|
|
||||||
outs = append(outs, &out)
|
|
||||||
}
|
}
|
||||||
return outs, nil
|
return outs, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue