mirror of https://github.com/docker/buildx.git
Merge pull request #2027 from thaJeztah/swap_reference
migrate reference github.com/distribution/reference
This commit is contained in:
commit
3631dc17c9
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/buildx/localstate"
|
||||
|
@ -33,7 +34,6 @@ import (
|
|||
"github.com/docker/buildx/util/resolver"
|
||||
"github.com/docker/buildx/util/waitmap"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
6
go.mod
6
go.mod
|
@ -5,15 +5,15 @@ go 1.20
|
|||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16
|
||||
github.com/compose-spec/compose-go v1.17.0
|
||||
github.com/compose-spec/compose-go v1.18.4
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.7.2
|
||||
github.com/containerd/continuity v0.4.1
|
||||
github.com/containerd/typeurl/v2 v2.1.1
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/distribution/reference v0.5.0
|
||||
github.com/docker/cli v24.0.5+incompatible
|
||||
github.com/docker/cli-docs-tool v0.6.0
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/docker v24.0.5+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gofrs/flock v0.8.1
|
||||
|
@ -75,7 +75,7 @@ require (
|
|||
github.com/containerd/ttrpc v1.2.2 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -48,8 +48,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc
|
|||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
|
@ -126,8 +126,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/compose-spec/compose-go v1.17.0 h1:cvje90CU94dQyTnJoHJYjx9yE4Iggse1XmGcO3Qi5ts=
|
||||
github.com/compose-spec/compose-go v1.17.0/go.mod h1:zR2tP1+kZHi5vJz7PjpW6oMoDji/Js3GHjP+hfjf70Q=
|
||||
github.com/compose-spec/compose-go v1.18.4 h1:yLYfsc3ATAYZVAJcXyx/V847/JVBmf3pfKfR13mXU4s=
|
||||
github.com/compose-spec/compose-go v1.18.4/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
|
@ -153,8 +153,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa h1:L9Ay/slwQ4ERSPaurC+TVkZrM0K98GNrEEo1En3e8as=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v24.0.5+incompatible h1:WeBimjvS0eKdH4Ygx+ihVq1Q++xg36M/rMi4aXAvodc=
|
||||
github.com/docker/cli v24.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/tests/workers"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/testutil/integration"
|
||||
bkworkers "github.com/moby/buildkit/util/testutil/workers"
|
||||
)
|
||||
|
|
|
@ -3,7 +3,7 @@ package buildflags
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
|
|
@ -11,9 +11,9 @@ import (
|
|||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -35,6 +36,8 @@ import (
|
|||
|
||||
// ProjectOptions provides common configuration for loading a project.
|
||||
type ProjectOptions struct {
|
||||
ctx context.Context
|
||||
|
||||
// Name is a valid Compose project name to be used or empty.
|
||||
//
|
||||
// If empty, the project loader will automatically infer a reasonable
|
||||
|
@ -63,7 +66,7 @@ type ProjectOptions struct {
|
|||
// NOTE: For security, the loader does not automatically expose any
|
||||
// process environment variables. For convenience, WithOsEnv can be
|
||||
// used if appropriate.
|
||||
Environment map[string]string
|
||||
Environment types.Mapping
|
||||
|
||||
// EnvFiles are file paths to ".env" files with additional environment
|
||||
// variable data.
|
||||
|
@ -191,7 +194,7 @@ func WithEnv(env []string) ProjectOptionsFn {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDiscardEnvFiles sets discards the `env_file` section after resolving to
|
||||
// WithDiscardEnvFile sets discards the `env_file` section after resolving to
|
||||
// the `environment` section
|
||||
func WithDiscardEnvFile(o *ProjectOptions) error {
|
||||
o.loadOptions = append(o.loadOptions, loader.WithDiscardEnvFiles)
|
||||
|
@ -206,6 +209,15 @@ func WithLoadOptions(loadOptions ...func(*loader.Options)) ProjectOptionsFn {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDefaultProfiles uses the provided profiles (if any), and falls back to
|
||||
// profiles specified via the COMPOSE_PROFILES environment variable otherwise.
|
||||
func WithDefaultProfiles(profile ...string) ProjectOptionsFn {
|
||||
if len(profile) == 0 {
|
||||
profile = strings.Split(os.Getenv(consts.ComposeProfiles), ",")
|
||||
}
|
||||
return WithProfiles(profile)
|
||||
}
|
||||
|
||||
// WithProfiles sets profiles to be activated
|
||||
func WithProfiles(profiles []string) ProjectOptionsFn {
|
||||
return func(o *ProjectOptions) error {
|
||||
|
@ -225,8 +237,9 @@ func WithOsEnv(o *ProjectOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// WithEnvFile set an alternate env file
|
||||
// deprecated - use WithEnvFiles
|
||||
// WithEnvFile sets an alternate env file.
|
||||
//
|
||||
// Deprecated: use WithEnvFiles instead.
|
||||
func WithEnvFile(file string) ProjectOptionsFn {
|
||||
var files []string
|
||||
if file != "" {
|
||||
|
@ -253,11 +266,7 @@ func WithDotEnv(o *ProjectOptions) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range envMap {
|
||||
if _, set := o.Environment[k]; !set {
|
||||
o.Environment[k] = v
|
||||
}
|
||||
}
|
||||
o.Environment.Merge(envMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -301,6 +310,24 @@ func WithResolvedPaths(resolve bool) ProjectOptionsFn {
|
|||
}
|
||||
}
|
||||
|
||||
// WithContext sets the context used to load model and resources
|
||||
func WithContext(ctx context.Context) ProjectOptionsFn {
|
||||
return func(o *ProjectOptions) error {
|
||||
o.ctx = ctx
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithResourceLoader register support for ResourceLoader to manage remote resources
|
||||
func WithResourceLoader(r loader.ResourceLoader) ProjectOptionsFn {
|
||||
return func(o *ProjectOptions) error {
|
||||
o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
|
||||
options.ResourceLoaders = append(options.ResourceLoaders, r)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultFileNames defines the Compose file names for auto-discovery (in order of preference)
|
||||
var DefaultFileNames = []string{"compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml"}
|
||||
|
||||
|
@ -367,7 +394,12 @@ func ProjectFromOptions(options *ProjectOptions) (*types.Project, error) {
|
|||
withNamePrecedenceLoad(absWorkingDir, options),
|
||||
withConvertWindowsPaths(options))
|
||||
|
||||
project, err := loader.Load(types.ConfigDetails{
|
||||
ctx := options.ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
project, err := loader.LoadWithContext(ctx, types.ConfigDetails{
|
||||
ConfigFiles: configs,
|
||||
WorkingDir: workingDir,
|
||||
Environment: options.Environment,
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
package loader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -43,12 +45,23 @@ var transformIncludeConfig TransformerFunc = func(data interface{}) (interface{}
|
|||
}
|
||||
}
|
||||
|
||||
func loadInclude(configDetails types.ConfigDetails, model *types.Config, options *Options, loaded []string) (*types.Config, error) {
|
||||
func loadInclude(ctx context.Context, filename string, configDetails types.ConfigDetails, model *types.Config, options *Options, loaded []string) (*types.Config, map[string][]types.IncludeConfig, error) {
|
||||
included := make(map[string][]types.IncludeConfig)
|
||||
for _, r := range model.Include {
|
||||
included[filename] = append(included[filename], r)
|
||||
|
||||
for i, p := range r.Path {
|
||||
if !filepath.IsAbs(p) {
|
||||
r.Path[i] = filepath.Join(configDetails.WorkingDir, p)
|
||||
for _, loader := range options.ResourceLoaders {
|
||||
if loader.Accept(p) {
|
||||
path, err := loader.Load(ctx, p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
p = path
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Path[i] = absPath(configDetails.WorkingDir, p)
|
||||
}
|
||||
if r.ProjectDirectory == "" {
|
||||
r.ProjectDirectory = filepath.Dir(r.Path[0])
|
||||
|
@ -60,27 +73,36 @@ func loadInclude(configDetails types.ConfigDetails, model *types.Config, options
|
|||
loadOptions.SkipNormalization = true
|
||||
loadOptions.SkipConsistencyCheck = true
|
||||
|
||||
env, err := dotenv.GetEnvFromFile(configDetails.Environment, r.ProjectDirectory, r.EnvFile)
|
||||
envFromFile, err := dotenv.GetEnvFromFile(configDetails.Environment, r.ProjectDirectory, r.EnvFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
imported, err := load(types.ConfigDetails{
|
||||
config := types.ConfigDetails{
|
||||
WorkingDir: r.ProjectDirectory,
|
||||
ConfigFiles: types.ToConfigFiles(r.Path),
|
||||
Environment: env,
|
||||
}, loadOptions, loaded)
|
||||
Environment: configDetails.Environment.Clone().Merge(envFromFile),
|
||||
}
|
||||
loadOptions.Interpolate = &interp.Options{
|
||||
Substitute: options.Interpolate.Substitute,
|
||||
LookupValue: config.LookupEnv,
|
||||
TypeCastMapping: options.Interpolate.TypeCastMapping,
|
||||
}
|
||||
imported, err := load(ctx, config, loadOptions, loaded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
for k, v := range imported.IncludeReferences {
|
||||
included[k] = append(included[k], v...)
|
||||
}
|
||||
|
||||
err = importResources(model, imported, r.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
model.Include = nil
|
||||
return model, nil
|
||||
return model, included, nil
|
||||
}
|
||||
|
||||
// importResources import into model all resources defined by imported, and report error on conflict
|
||||
|
@ -92,6 +114,12 @@ func importResources(model *types.Config, imported *types.Project, path []string
|
|||
}
|
||||
model.Services = append(model.Services, service)
|
||||
}
|
||||
for _, service := range imported.DisabledServices {
|
||||
if _, ok := services[service.Name]; ok {
|
||||
return fmt.Errorf("imported compose file %s defines conflicting service %s", path, service.Name)
|
||||
}
|
||||
model.Services = append(model.Services, service)
|
||||
}
|
||||
for n, network := range imported.Networks {
|
||||
if _, ok := model.Networks[n]; ok {
|
||||
return fmt.Errorf("imported compose file %s defines conflicting network %s", path, n)
|
||||
|
|
|
@ -17,7 +17,10 @@
|
|||
package loader
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
paths "path"
|
||||
"path/filepath"
|
||||
|
@ -68,6 +71,16 @@ type Options struct {
|
|||
projectNameImperativelySet bool
|
||||
// Profiles set profiles to enable
|
||||
Profiles []string
|
||||
// ResourceLoaders manages support for remote resources
|
||||
ResourceLoaders []ResourceLoader
|
||||
}
|
||||
|
||||
// ResourceLoader is a plugable remote resource resolver
|
||||
type ResourceLoader interface {
|
||||
// Accept returns `true` is the resource reference matches ResourceLoader supported protocol(s)
|
||||
Accept(path string) bool
|
||||
// Load returns the path to a local copy of remote resource identified by `path`.
|
||||
Load(ctx context.Context, path string) (string, error)
|
||||
}
|
||||
|
||||
func (o *Options) clone() *Options {
|
||||
|
@ -85,6 +98,7 @@ func (o *Options) clone() *Options {
|
|||
projectName: o.projectName,
|
||||
projectNameImperativelySet: o.projectNameImperativelySet,
|
||||
Profiles: o.Profiles,
|
||||
ResourceLoaders: o.ResourceLoaders,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +168,9 @@ func WithProfiles(profiles []string) func(*Options) {
|
|||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
// structure, and returns it.
|
||||
func ParseYAML(source []byte) (map[string]interface{}, error) {
|
||||
m, _, err := parseYAML(source)
|
||||
r := bytes.NewReader(source)
|
||||
decoder := yaml.NewDecoder(r)
|
||||
m, _, err := parseYAML(decoder)
|
||||
return m, err
|
||||
}
|
||||
|
||||
|
@ -167,11 +183,11 @@ type PostProcessor interface {
|
|||
Apply(config *types.Config) error
|
||||
}
|
||||
|
||||
func parseYAML(source []byte) (map[string]interface{}, PostProcessor, error) {
|
||||
func parseYAML(decoder *yaml.Decoder) (map[string]interface{}, PostProcessor, error) {
|
||||
var cfg interface{}
|
||||
processor := ResetProcessor{target: &cfg}
|
||||
|
||||
if err := yaml.Unmarshal(source, &processor); err != nil {
|
||||
if err := decoder.Decode(&processor); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stringMap, ok := cfg.(map[string]interface{})
|
||||
|
@ -193,8 +209,14 @@ func parseYAML(source []byte) (map[string]interface{}, PostProcessor, error) {
|
|||
return converted.(map[string]interface{}), &processor, nil
|
||||
}
|
||||
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration.
|
||||
// Deprecated: use LoadWithContext.
|
||||
func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) {
|
||||
return LoadWithContext(context.Background(), configDetails, options...)
|
||||
}
|
||||
|
||||
// LoadWithContext reads a ConfigDetails and returns a fully loaded configuration
|
||||
func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) {
|
||||
if len(configDetails.ConfigFiles) < 1 {
|
||||
return nil, errors.Errorf("No files specified")
|
||||
}
|
||||
|
@ -217,10 +239,10 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||
return nil, err
|
||||
}
|
||||
opts.projectName = projectName
|
||||
return load(configDetails, opts, nil)
|
||||
return load(ctx, configDetails, opts, nil)
|
||||
}
|
||||
|
||||
func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*types.Project, error) {
|
||||
func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (*types.Project, error) {
|
||||
var model *types.Config
|
||||
|
||||
mainFile := configDetails.ConfigFiles[0].Filename
|
||||
|
@ -232,9 +254,56 @@ func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*t
|
|||
}
|
||||
loaded = append(loaded, mainFile)
|
||||
|
||||
for i, file := range configDetails.ConfigFiles {
|
||||
includeRefs := make(map[string][]types.IncludeConfig)
|
||||
first := true
|
||||
for _, file := range configDetails.ConfigFiles {
|
||||
var postProcessor PostProcessor
|
||||
configDict := file.Config
|
||||
|
||||
processYaml := func() error {
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(configDict); err != nil {
|
||||
return fmt.Errorf("validating %s: %w", file.Filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
configDict = groupXFieldsIntoExtensions(configDict)
|
||||
|
||||
cfg, err := loadSections(ctx, file.Filename, configDict, configDetails, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.SkipInclude {
|
||||
var included map[string][]types.IncludeConfig
|
||||
cfg, included, err = loadInclude(ctx, file.Filename, configDetails, cfg, opts, loaded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range included {
|
||||
includeRefs[k] = append(includeRefs[k], v...)
|
||||
}
|
||||
}
|
||||
|
||||
if first {
|
||||
first = false
|
||||
model = cfg
|
||||
return nil
|
||||
}
|
||||
merged, err := merge([]*types.Config{model, cfg})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if postProcessor != nil {
|
||||
err = postProcessor.Apply(merged)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
model = merged
|
||||
return nil
|
||||
}
|
||||
|
||||
if configDict == nil {
|
||||
if len(file.Content) == 0 {
|
||||
content, err := os.ReadFile(file.Filename)
|
||||
|
@ -243,52 +312,29 @@ func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*t
|
|||
}
|
||||
file.Content = content
|
||||
}
|
||||
dict, p, err := parseConfig(file.Content, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s: %w", file.Filename, err)
|
||||
|
||||
r := bytes.NewReader(file.Content)
|
||||
decoder := yaml.NewDecoder(r)
|
||||
for {
|
||||
dict, p, err := parseConfig(decoder, opts)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, fmt.Errorf("parsing %s: %w", file.Filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
configDict = dict
|
||||
postProcessor = p
|
||||
|
||||
if err := processYaml(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
configDict = dict
|
||||
file.Config = dict
|
||||
configDetails.ConfigFiles[i] = file
|
||||
postProcessor = p
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(configDict); err != nil {
|
||||
return nil, fmt.Errorf("validating %s: %w", file.Filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
configDict = groupXFieldsIntoExtensions(configDict)
|
||||
|
||||
cfg, err := loadSections(file.Filename, configDict, configDetails, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !opts.SkipInclude {
|
||||
cfg, err = loadInclude(configDetails, cfg, opts, loaded)
|
||||
if err != nil {
|
||||
} else {
|
||||
if err := processYaml(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
model = cfg
|
||||
continue
|
||||
}
|
||||
|
||||
merged, err := merge([]*types.Config{model, cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if postProcessor != nil {
|
||||
err = postProcessor.Apply(merged)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
model = merged
|
||||
}
|
||||
|
||||
project := &types.Project{
|
||||
|
@ -303,6 +349,10 @@ func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*t
|
|||
Extensions: model.Extensions,
|
||||
}
|
||||
|
||||
if len(includeRefs) != 0 {
|
||||
project.IncludeReferences = includeRefs
|
||||
}
|
||||
|
||||
if !opts.SkipNormalization {
|
||||
err := Normalize(project)
|
||||
if err != nil {
|
||||
|
@ -333,9 +383,6 @@ func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*t
|
|||
}
|
||||
}
|
||||
|
||||
if profiles, ok := project.Environment[consts.ComposeProfiles]; ok && len(opts.Profiles) == 0 {
|
||||
opts.Profiles = strings.Split(profiles, ",")
|
||||
}
|
||||
project.ApplyProfiles(opts.Profiles)
|
||||
|
||||
err := project.ResolveServicesEnvironment(opts.discardEnvFiles)
|
||||
|
@ -422,8 +469,8 @@ func NormalizeProjectName(s string) string {
|
|||
return strings.TrimLeft(s, "_-")
|
||||
}
|
||||
|
||||
func parseConfig(b []byte, opts *Options) (map[string]interface{}, PostProcessor, error) {
|
||||
yml, postProcessor, err := parseYAML(b)
|
||||
func parseConfig(decoder *yaml.Decoder, opts *Options) (map[string]interface{}, PostProcessor, error) {
|
||||
yml, postProcessor, err := parseYAML(decoder)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -453,7 +500,7 @@ func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interfac
|
|||
return dict
|
||||
}
|
||||
|
||||
func loadSections(filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) {
|
||||
func loadSections(ctx context.Context, filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) {
|
||||
var err error
|
||||
cfg := types.Config{
|
||||
Filename: filename,
|
||||
|
@ -466,7 +513,7 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
|||
}
|
||||
}
|
||||
cfg.Name = name
|
||||
cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts)
|
||||
cfg.Services, err = LoadServices(ctx, filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -659,7 +706,7 @@ func formatInvalidKeyError(keyPrefix string, key interface{}) error {
|
|||
|
||||
// LoadServices produces a ServiceConfig map from a compose file Dict
|
||||
// the servicesDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) {
|
||||
func LoadServices(ctx context.Context, filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) {
|
||||
var services []types.ServiceConfig
|
||||
|
||||
x, ok := servicesDict[extensions]
|
||||
|
@ -672,7 +719,7 @@ func LoadServices(filename string, servicesDict map[string]interface{}, workingD
|
|||
}
|
||||
|
||||
for name := range servicesDict {
|
||||
serviceConfig, err := loadServiceWithExtends(filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{})
|
||||
serviceConfig, err := loadServiceWithExtends(ctx, filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -683,7 +730,7 @@ func LoadServices(filename string, servicesDict map[string]interface{}, workingD
|
|||
return services, nil
|
||||
}
|
||||
|
||||
func loadServiceWithExtends(filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) {
|
||||
func loadServiceWithExtends(ctx context.Context, filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) {
|
||||
if err := ct.Add(filename, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -707,11 +754,21 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
|||
var baseService *types.ServiceConfig
|
||||
file := serviceConfig.Extends.File
|
||||
if file == "" {
|
||||
baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct)
|
||||
baseService, err = loadServiceWithExtends(ctx, filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
for _, loader := range opts.ResourceLoaders {
|
||||
if loader.Accept(file) {
|
||||
path, err := loader.Load(ctx, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file = path
|
||||
break
|
||||
}
|
||||
}
|
||||
// Resolve the path to the imported file, and load it.
|
||||
baseFilePath := absPath(workingDir, file)
|
||||
|
||||
|
@ -720,13 +777,16 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
|||
return nil, err
|
||||
}
|
||||
|
||||
baseFile, _, err := parseConfig(b, opts)
|
||||
r := bytes.NewReader(b)
|
||||
decoder := yaml.NewDecoder(r)
|
||||
|
||||
baseFile, _, err := parseConfig(decoder, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseFileServices := getSection(baseFile, "services")
|
||||
baseService, err = loadServiceWithExtends(baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct)
|
||||
baseService, err = loadServiceWithExtends(ctx, baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1038,7 +1098,12 @@ var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (inte
|
|||
value["count"] = -1
|
||||
return value, nil
|
||||
}
|
||||
return data, errors.Errorf("invalid string value for 'count' (the only value allowed is 'all')")
|
||||
i, err := strconv.ParseInt(val, 10, 64)
|
||||
if err == nil {
|
||||
value["count"] = i
|
||||
return value, nil
|
||||
}
|
||||
return data, errors.Errorf("invalid string value for 'count' (the only value allowed is 'all' or a number)")
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for device count", val)
|
||||
}
|
||||
|
|
|
@ -64,6 +64,25 @@ func ResolveRelativePaths(project *types.Project) error {
|
|||
project.Volumes[name] = config
|
||||
}
|
||||
}
|
||||
|
||||
// don't coerce a nil map to an empty map
|
||||
if project.IncludeReferences != nil {
|
||||
absIncludes := make(map[string][]types.IncludeConfig, len(project.IncludeReferences))
|
||||
for filename, config := range project.IncludeReferences {
|
||||
filename = absPath(project.WorkingDir, filename)
|
||||
absConfigs := make([]types.IncludeConfig, len(config))
|
||||
for i, c := range config {
|
||||
absConfigs[i] = types.IncludeConfig{
|
||||
Path: resolvePaths(project.WorkingDir, c.Path),
|
||||
ProjectDirectory: absPath(project.WorkingDir, c.ProjectDirectory),
|
||||
EnvFile: resolvePaths(project.WorkingDir, c.EnvFile),
|
||||
}
|
||||
}
|
||||
absIncludes[filename] = absConfigs
|
||||
}
|
||||
project.IncludeReferences = absIncludes
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -133,3 +152,14 @@ func isRemoteContext(maybeURL string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolvePaths(basePath string, in types.StringList) types.StringList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
ret := make(types.StringList, len(in))
|
||||
for i := range in {
|
||||
ret[i] = absPath(basePath, in[i])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ type ConfigDetails struct {
|
|||
Version string
|
||||
WorkingDir string
|
||||
ConfigFiles []ConfigFile
|
||||
Environment map[string]string
|
||||
Environment Mapping
|
||||
}
|
||||
|
||||
// LookupEnv provides a lookup function for environment variables
|
||||
|
|
|
@ -24,10 +24,9 @@ import (
|
|||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/compose-spec/compose-go/utils"
|
||||
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
"github.com/distribution/distribution/v3/reference"
|
||||
"github.com/compose-spec/compose-go/utils"
|
||||
"github.com/distribution/reference"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -36,16 +35,22 @@ import (
|
|||
|
||||
// Project is the result of loading a set of compose files
|
||||
type Project struct {
|
||||
Name string `yaml:"name,omitempty" json:"name,omitempty"`
|
||||
WorkingDir string `yaml:"-" json:"-"`
|
||||
Services Services `yaml:"services" json:"services"`
|
||||
Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"`
|
||||
Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"`
|
||||
Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"`
|
||||
Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"`
|
||||
Extensions Extensions `yaml:"#extensions,inline" json:"-"` // https://github.com/golang/go/issues/6213
|
||||
ComposeFiles []string `yaml:"-" json:"-"`
|
||||
Environment Mapping `yaml:"-" json:"-"`
|
||||
Name string `yaml:"name,omitempty" json:"name,omitempty"`
|
||||
WorkingDir string `yaml:"-" json:"-"`
|
||||
Services Services `yaml:"services" json:"services"`
|
||||
Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"`
|
||||
Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"`
|
||||
Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"`
|
||||
Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"`
|
||||
Extensions Extensions `yaml:"#extensions,inline" json:"-"` // https://github.com/golang/go/issues/6213
|
||||
|
||||
// IncludeReferences is keyed by Compose YAML filename and contains config for
|
||||
// other Compose YAML files it directly triggered a load of via `include`.
|
||||
//
|
||||
// Note: this is
|
||||
IncludeReferences map[string][]IncludeConfig `yaml:"-" json:"-"`
|
||||
ComposeFiles []string `yaml:"-" json:"-"`
|
||||
Environment Mapping `yaml:"-" json:"-"`
|
||||
|
||||
// DisabledServices track services which have been disable as profile is not active
|
||||
DisabledServices Services `yaml:"-" json:"-"`
|
||||
|
@ -418,18 +423,36 @@ func (p *Project) ForServices(names []string, options ...DependencyOption) error
|
|||
if _, ok := set[s.Name]; ok {
|
||||
for _, option := range options {
|
||||
if option == IgnoreDependencies {
|
||||
s.DependsOn = nil
|
||||
// remove all dependencies but those implied by explicitly selected services
|
||||
dependencies := s.DependsOn
|
||||
for d := range dependencies {
|
||||
if _, ok := set[d]; !ok {
|
||||
delete(dependencies, d)
|
||||
}
|
||||
}
|
||||
s.DependsOn = dependencies
|
||||
}
|
||||
}
|
||||
enabled = append(enabled, s)
|
||||
} else {
|
||||
p.DisabledServices = append(p.DisabledServices, s)
|
||||
p.DisableService(s)
|
||||
}
|
||||
}
|
||||
p.Services = enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Project) DisableService(service ServiceConfig) {
|
||||
// We should remove all dependencies which reference the disabled service
|
||||
for i, s := range p.Services {
|
||||
if _, ok := s.DependsOn[service.Name]; ok {
|
||||
delete(s.DependsOn, service.Name)
|
||||
p.Services[i] = s
|
||||
}
|
||||
}
|
||||
p.DisabledServices = append(p.DisabledServices, service)
|
||||
}
|
||||
|
||||
// ResolveImages updates services images to include digest computed by a resolver function
|
||||
func (p *Project) ResolveImages(resolver func(named reference.Named) (godigest.Digest, error)) error {
|
||||
eg := errgroup.Group{}
|
||||
|
|
|
@ -491,6 +491,16 @@ func NewMapping(values []string) Mapping {
|
|||
return mapping
|
||||
}
|
||||
|
||||
// convert values into a set of KEY=VALUE strings
|
||||
func (m Mapping) Values() []string {
|
||||
values := make([]string, 0, len(m))
|
||||
for k, v := range m {
|
||||
values = append(values, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return values
|
||||
}
|
||||
|
||||
// ToMappingWithEquals converts Mapping into a MappingWithEquals with pointer references
|
||||
func (m Mapping) ToMappingWithEquals() MappingWithEquals {
|
||||
mapping := MappingWithEquals{}
|
||||
|
@ -506,6 +516,24 @@ func (m Mapping) Resolve(s string) (string, bool) {
|
|||
return v, ok
|
||||
}
|
||||
|
||||
func (m Mapping) Clone() Mapping {
|
||||
clone := Mapping{}
|
||||
for k, v := range m {
|
||||
clone[k] = v
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// Merge adds all values from second mapping which are not already defined
|
||||
func (m Mapping) Merge(o Mapping) Mapping {
|
||||
for k, v := range o {
|
||||
if _, set := m[k]; !set {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Labels is a mapping type for labels
|
||||
type Labels map[string]string
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
*.go text eol=lf
|
|
@ -0,0 +1,2 @@
|
|||
# Cover profiles
|
||||
*.out
|
|
@ -0,0 +1,18 @@
|
|||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- dupword # Checks for duplicate words in the source code
|
||||
- gofmt
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
deadline: 2m
|
|
@ -0,0 +1,5 @@
|
|||
# Code of Conduct
|
||||
|
||||
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
|
|
@ -0,0 +1,114 @@
|
|||
# Contributing to the reference library
|
||||
|
||||
## Community help
|
||||
|
||||
If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
|
||||
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
|
||||
|
||||
## Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of (or similar for other container tools):
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers or reviewers and merged when acceptable.
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
|
||||
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
3. Test your code
|
||||
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
|
||||
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
||||
|
||||
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
for tips on creating a successful contribution.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
|
@ -0,0 +1,144 @@
|
|||
# distribution/reference Project Governance
|
||||
|
||||
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
|
||||
|
||||
For specific guidance on practical contribution steps please
|
||||
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
|
||||
|
||||
## Maintainership
|
||||
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
|
||||
## Reviewers
|
||||
|
||||
A reviewer is a core role within the project.
|
||||
They share in reviewing issues and pull requests and their LGTM counts towards the
|
||||
required LGTM count to merge a code change into the project.
|
||||
|
||||
Reviewers are part of the organization but do not have write access.
|
||||
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
|
||||
|
||||
## Adding maintainers
|
||||
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed in a pull request or a
|
||||
maintainers communication channel.
|
||||
|
||||
After a candidate has been announced to the maintainers, the existing
|
||||
maintainers are given five business days to discuss the candidate, raise
|
||||
objections and cast their vote. Votes may take place on the communication
|
||||
channel or via pull request comment. Candidates must be approved by at least 66%
|
||||
of the current maintainers by adding their vote on the mailing list. The
|
||||
reviewer role has the same process but only requires 33% of current maintainers.
|
||||
Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The voting process may take place inside a pull request if a
|
||||
maintainer has already discussed the candidacy with the candidate and a
|
||||
maintainer is willing to be a sponsor by opening the pull request. The candidate
|
||||
becomes a maintainer once the pull request is merged.
|
||||
|
||||
## Stepping down policy
|
||||
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
|
||||
## Removal of inactive maintainers
|
||||
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of the current
|
||||
maintainers. In this case, maintainers should first propose the change to
|
||||
maintainers via the maintainers communication channel, then open a pull request
|
||||
for voting. The voting period is five business days. The voting pull request
|
||||
should not come as a surpise to any maintainer and any discussion related to
|
||||
performance must not be discussed on the pull request.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Docker distribution is an open-source project with an open design philosophy.
|
||||
This means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's part of
|
||||
the project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting distribution, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
|
||||
## Helping contributors with the DCO
|
||||
|
||||
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
|
||||
## I'm a maintainer. Should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
If you have a technical dispute that you feel has reached an impasse with a
|
||||
subset of the community, any contributor may open an issue, specifically
|
||||
calling for a resolution vote of the current core maintainers to resolve the
|
||||
dispute. The same voting quorums required (2/3) for adding and removing
|
||||
maintainers will apply to conflict resolution.
|
|
@ -0,0 +1,26 @@
|
|||
# Distribution project maintainers & reviewers
|
||||
#
|
||||
# See GOVERNANCE.md for maintainer versus reviewer roles
|
||||
#
|
||||
# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
|
||||
# GitHub ID, Name, Email address
|
||||
"chrispat","Chris Patterson","chrispat@github.com"
|
||||
"clarkbw","Bryan Clark","clarkbw@github.com"
|
||||
"corhere","Cory Snider","csnider@mirantis.com"
|
||||
"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
|
||||
"heww","He Weiwei","hweiwei@vmware.com"
|
||||
"joaodrp","João Pereira","jpereira@gitlab.com"
|
||||
"justincormack","Justin Cormack","justin.cormack@docker.com"
|
||||
"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
|
||||
"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
|
||||
"sargun","Sargun Dhillon","sargun@sargun.me"
|
||||
"wy65701436","Wang Yan","wangyan@vmware.com"
|
||||
"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
|
||||
#
|
||||
# REVIEWERS
|
||||
# GitHub ID, Name, Email address
|
||||
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
|
||||
"stevvooe","Stephen Day","stevvooe@gmail.com"
|
||||
"thajeztah","Sebastiaan van Stijn","github@gone.nl"
|
||||
"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
|
||||
"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
|
|
@ -0,0 +1,25 @@
|
|||
# Project packages.
|
||||
PACKAGES=$(shell go list ./...)
|
||||
|
||||
# Flags passed to `go test`
|
||||
BUILDFLAGS ?=
|
||||
TESTFLAGS ?=
|
||||
|
||||
.PHONY: all build test coverage
|
||||
.DEFAULT: all
|
||||
|
||||
all: build
|
||||
|
||||
build: ## no binaries to build, so just check compilation suceeds
|
||||
go build ${BUILDFLAGS} ./...
|
||||
|
||||
test: ## run tests
|
||||
go test ${TESTFLAGS} ./...
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests
|
||||
rm -f coverage.txt
|
||||
go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
|
@ -0,0 +1,30 @@
|
|||
# Distribution reference
|
||||
|
||||
Go library to handle references to container images.
|
||||
|
||||
<img src="/distribution-logo.svg" width="200px" />
|
||||
|
||||
[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI)
|
||||
[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference)
|
||||
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
|
||||
[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference)
|
||||
[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
|
||||
|
||||
This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
|
||||
|
||||
## Contribution
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project.
|
||||
|
||||
## Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
|
||||
that everyone is welcome to join and chat about development.
|
||||
|
||||
## Licenses
|
||||
|
||||
The distribution codebase is released under the [Apache 2.0 license](LICENSE).
|
|
@ -0,0 +1,7 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
|
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.6 KiB |
|
@ -140,7 +140,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
|
|||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
|
@ -119,7 +119,7 @@ github.com/cenkalti/backoff/v4
|
|||
# github.com/cespare/xxhash/v2 v2.2.0
|
||||
## explicit; go 1.11
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/compose-spec/compose-go v1.17.0
|
||||
# github.com/compose-spec/compose-go v1.18.4
|
||||
## explicit; go 1.19
|
||||
github.com/compose-spec/compose-go/cli
|
||||
github.com/compose-spec/compose-go/consts
|
||||
|
@ -194,9 +194,9 @@ github.com/cyphar/filepath-securejoin
|
|||
# github.com/davecgh/go-spew v1.1.1
|
||||
## explicit
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa
|
||||
## explicit; go 1.18
|
||||
github.com/distribution/distribution/v3/reference
|
||||
# github.com/distribution/reference v0.5.0
|
||||
## explicit; go 1.20
|
||||
github.com/distribution/reference
|
||||
# github.com/docker/cli v24.0.5+incompatible
|
||||
## explicit
|
||||
github.com/docker/cli/cli
|
||||
|
|
Loading…
Reference in New Issue