vendor: update compose-go to v2.0.0-rc.8

Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax 2024-02-24 16:47:40 +01:00
parent 545a5c97c6
commit 95185e9525
No known key found for this signature in database
GPG Key ID: ADE44D8C9D44FBE4
36 changed files with 1185 additions and 496 deletions

14
go.mod
View File

@ -5,7 +5,7 @@ go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/aws/aws-sdk-go-v2/config v1.26.6
github.com/compose-spec/compose-go/v2 v2.0.0-rc.3
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8
github.com/containerd/console v1.0.4
github.com/containerd/containerd v1.7.13
github.com/containerd/continuity v0.4.3
@ -46,9 +46,9 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.19.0
go.opentelemetry.io/otel/trace v1.19.0
golang.org/x/mod v0.14.0
golang.org/x/sync v0.5.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.16.0
golang.org/x/term v0.15.0
golang.org/x/term v0.16.0
google.golang.org/grpc v1.59.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.2
@ -155,13 +155,13 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.16.1 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect

28
go.sum
View File

@ -88,8 +88,8 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.3 h1:t0qajSNkH3zR4HEN2CM+GVU7GBx5AwqiYJk5w800M7w=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.3/go.mod h1:r7CJHU0GaLtRVLm2ch8RCNkJh3GHyaqqc2rSti7VP44=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8 h1:b7l+GqFF+2W4M4kLQUDRTGhqmTiRwT3bYd9X7xrxp5Q=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
@ -518,11 +518,11 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -542,8 +542,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
@ -554,8 +554,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -576,8 +576,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -593,8 +593,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -37,8 +37,6 @@ import (
// ProjectOptions provides common configuration for loading a project.
type ProjectOptions struct {
ctx context.Context
// Name is a valid Compose project name to be used or empty.
//
// If empty, the project loader will automatically infer a reasonable
@ -80,6 +78,10 @@ type ProjectOptions struct {
EnvFiles []string
loadOptions []func(*loader.Options)
// Callbacks to retrieve metadata information during parse defined before
// creating the project
Listeners []loader.Listener
}
type ProjectOptionsFn func(*ProjectOptions) error
@ -89,6 +91,7 @@ func NewProjectOptions(configs []string, opts ...ProjectOptionsFn) (*ProjectOpti
options := &ProjectOptions{
ConfigPaths: configs,
Environment: map[string]string{},
Listeners: []loader.Listener{},
}
for _, o := range opts {
err := o(options)
@ -334,14 +337,6 @@ func WithResolvedPaths(resolve bool) ProjectOptionsFn {
}
}
// WithContext sets the context used to load model and resources
func WithContext(ctx context.Context) ProjectOptionsFn {
return func(o *ProjectOptions) error {
o.ctx = ctx
return nil
}
}
// WithResourceLoader register support for ResourceLoader to manage remote resources
func WithResourceLoader(r loader.ResourceLoader) ProjectOptionsFn {
return func(o *ProjectOptions) error {
@ -352,6 +347,24 @@ func WithResourceLoader(r loader.ResourceLoader) ProjectOptionsFn {
}
}
// WithExtension register a know extension `x-*` with the go struct type to decode into
func WithExtension(name string, typ any) ProjectOptionsFn {
return func(o *ProjectOptions) error {
o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
if options.KnownExtensions == nil {
options.KnownExtensions = map[string]any{}
}
options.KnownExtensions[name] = typ
})
return nil
}
}
// Append listener to event
func (o *ProjectOptions) WithListeners(listeners ...loader.Listener) {
o.Listeners = append(o.Listeners, listeners...)
}
// WithoutEnvironmentResolution disable environment resolution
func WithoutEnvironmentResolution(o *ProjectOptions) error {
o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
@ -368,7 +381,7 @@ var DefaultOverrideFileNames = []string{"compose.override.yml", "compose.overrid
func (o ProjectOptions) GetWorkingDir() (string, error) {
if o.WorkingDir != "" {
return o.WorkingDir, nil
return filepath.Abs(o.WorkingDir)
}
for _, path := range o.ConfigPaths {
if path != "-" {
@ -382,9 +395,8 @@ func (o ProjectOptions) GetWorkingDir() (string, error) {
return os.Getwd()
}
// ProjectFromOptions load a compose project based on command line options
func ProjectFromOptions(options *ProjectOptions) (*types.Project, error) {
configPaths, err := getConfigPathsFromOptions(options)
func (o ProjectOptions) GeConfigFiles() ([]types.ConfigFile, error) {
configPaths, err := o.getConfigPaths()
if err != nil {
return nil, err
}
@ -412,24 +424,25 @@ func ProjectFromOptions(options *ProjectOptions) (*types.Project, error) {
Content: b,
})
}
return configs, err
}
// ProjectFromOptions load a compose project based on command line options
func ProjectFromOptions(ctx context.Context, options *ProjectOptions) (*types.Project, error) {
configs, err := options.GeConfigFiles()
if err != nil {
return nil, err
}
workingDir, err := options.GetWorkingDir()
if err != nil {
return nil, err
}
absWorkingDir, err := filepath.Abs(workingDir)
if err != nil {
return nil, err
}
options.loadOptions = append(options.loadOptions,
withNamePrecedenceLoad(absWorkingDir, options),
withConvertWindowsPaths(options))
ctx := options.ctx
if ctx == nil {
ctx = context.Background()
}
withNamePrecedenceLoad(workingDir, options),
withConvertWindowsPaths(options),
withListeners(options))
project, err := loader.LoadWithContext(ctx, types.ConfigDetails{
ConfigFiles: configs,
@ -440,7 +453,10 @@ func ProjectFromOptions(options *ProjectOptions) (*types.Project, error) {
return nil, err
}
project.ComposeFiles = configPaths
for _, config := range configs {
project.ComposeFiles = append(project.ComposeFiles, config.Filename)
}
return project, nil
}
@ -467,10 +483,17 @@ func withConvertWindowsPaths(options *ProjectOptions) func(*loader.Options) {
}
}
// getConfigPathsFromOptions retrieves the config files for project based on project options
func getConfigPathsFromOptions(options *ProjectOptions) ([]string, error) {
if len(options.ConfigPaths) != 0 {
return absolutePaths(options.ConfigPaths)
// save listeners from ProjectOptions (compose) to loader.Options
func withListeners(options *ProjectOptions) func(*loader.Options) {
return func(opts *loader.Options) {
opts.Listeners = append(opts.Listeners, options.Listeners...)
}
}
// getConfigPaths retrieves the config files for project based on project options
func (o *ProjectOptions) getConfigPaths() ([]string, error) {
if len(o.ConfigPaths) != 0 {
return absolutePaths(o.ConfigPaths)
}
return nil, fmt.Errorf("no configuration file provided: %w", errdefs.ErrNotFound)
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loader
import (
"fmt"
"github.com/compose-spec/compose-go/v2/types"
)
// Will update the environment variables for the format {- VAR} (without interpolation)
// This function should resolve context environment vars for include (passed in env_file)
func resolveServicesEnvironment(dict map[string]any, config types.ConfigDetails) {
services, ok := dict["services"].(map[string]any)
if !ok {
return
}
for service, cfg := range services {
serviceConfig, ok := cfg.(map[string]any)
if !ok {
continue
}
serviceEnv, ok := serviceConfig["environment"].([]any)
if !ok {
continue
}
envs := []any{}
for _, env := range serviceEnv {
varEnv, ok := env.(string)
if !ok {
continue
}
if found, ok := config.Environment[varEnv]; ok {
envs = append(envs, fmt.Sprintf("%s=%s", varEnv, found))
} else {
// either does not exist or it was already resolved in interpolation
envs = append(envs, varEnv)
}
}
serviceConfig["environment"] = envs
services[service] = serviceConfig
}
dict["services"] = services
}

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/compose-spec/compose-go/v2/consts"
"github.com/compose-spec/compose-go/v2/override"
@ -60,11 +61,8 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
return s, nil
}
filename := ctx.Value(consts.ComposeFileKey{}).(string)
tracker, err := tracker.Add(filename, name)
if err != nil {
return nil, err
}
var (
err error
ref string
file any
)
@ -72,14 +70,16 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
case map[string]any:
ref = v["service"].(string)
file = v["file"]
opts.ProcessEvent("extends", v)
case string:
ref = v
opts.ProcessEvent("extends", map[string]any{"service": ref})
}
var base any
if file != nil {
path := file.(string)
services, err = getExtendsBaseFromFile(ctx, ref, path, opts, tracker)
filename = file.(string)
services, err = getExtendsBaseFromFile(ctx, ref, filename, opts, tracker)
if err != nil {
return nil, err
}
@ -89,6 +89,12 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename)
}
}
tracker, err = tracker.Add(filename, name)
if err != nil {
return nil, err
}
// recursively apply `extends`
base, err = applyServiceExtends(ctx, ref, services, opts, tracker, post...)
if err != nil {
@ -99,6 +105,12 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
return service, nil
}
source := deepClone(base).(map[string]any)
err = validateExtendSource(source, ref)
if err != nil {
return nil, err
}
for _, processor := range post {
processor.Apply(map[string]any{
"services": map[string]any{
@ -111,9 +123,34 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a
return nil, err
}
delete(merged, "extends")
services[name] = merged
return merged, nil
}
// validateExtendSource check the source for `extends` doesn't refer to another container/service
func validateExtendSource(source map[string]any, ref string) error {
forbidden := []string{"links", "volumes_from", "depends_on"}
for _, key := range forbidden {
if _, ok := source[key]; ok {
return fmt.Errorf("service %q can't be used with `extends` as it declare `%s`", ref, key)
}
}
sharedNamespace := []string{"network_mode", "ipc", "pid", "net", "cgroup", "userns_mode", "uts"}
for _, key := range sharedNamespace {
if v, ok := source[key]; ok {
val := v.(string)
if strings.HasPrefix(val, types.ContainerPrefix) {
return fmt.Errorf("service %q can't be used with `extends` as it shares `%s` with another container", ref, key)
}
if strings.HasPrefix(val, types.ServicePrefix) {
return fmt.Errorf("service %q can't be used with `extends` as it shares `%s` with another service", ref, key)
}
}
}
return nil
}
func getExtendsBaseFromFile(ctx context.Context, name string, path string, opts *Options, ct *cycleTracker) (map[string]any, error) {
for _, loader := range opts.ResourceLoaders {
if !loader.Accept(path) {
@ -137,6 +174,7 @@ func getExtendsBaseFromFile(ctx context.Context, name string, path string, opts
extendsOpts.SkipInclude = true
extendsOpts.SkipExtends = true // we manage extends recursively based on raw service definition
extendsOpts.SkipValidation = true // we validate the merge result
extendsOpts.SkipDefaultValues = true
source, err := loadYamlModel(ctx, types.ConfigDetails{
WorkingDir: relworkingdir,
ConfigFiles: []types.ConfigFile{

View File

@ -26,7 +26,8 @@ services:
additional_contexts:
foo: ./bar
secrets:
- secret1
- source: secret1
target: /run/secrets/secret1
- source: secret2
target: my_secret
uid: '103'
@ -257,7 +258,8 @@ services:
restart: always
secrets:
- secret1
- source: secret1
target: /run/secrets/secret1
- source: secret2
target: my_secret
uid: '103'

View File

@ -34,6 +34,14 @@ func loadIncludeConfig(source any) ([]types.IncludeConfig, error) {
if source == nil {
return nil, nil
}
configs := source.([]any)
for i, config := range configs {
if v, ok := config.(string); ok {
configs[i] = map[string]any{
"path": v,
}
}
}
var requires []types.IncludeConfig
err := Transform(source, &requires)
return requires, err
@ -45,6 +53,13 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
return err
}
for _, r := range includeConfig {
for _, listener := range options.Listeners {
listener("include", map[string]any{
"path": r.Path,
"workingdir": configDetails.WorkingDir,
})
}
for i, p := range r.Path {
for _, loader := range options.ResourceLoaders {
if loader.Accept(p) {
@ -56,7 +71,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
break
}
}
r.Path[i] = absPath(configDetails.WorkingDir, p)
r.Path[i] = p
}
mainFile := r.Path[0]
@ -70,17 +85,41 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
if r.ProjectDirectory == "" {
r.ProjectDirectory = filepath.Dir(mainFile)
}
relworkingdir, err := filepath.Rel(configDetails.WorkingDir, r.ProjectDirectory)
if err != nil {
// included file path is not inside project working directory => use absolute path
relworkingdir = r.ProjectDirectory
}
loadOptions := options.clone()
loadOptions.ResolvePaths = true
loadOptions.SkipNormalization = true
loadOptions.SkipConsistencyCheck = true
loadOptions.ResourceLoaders = append(loadOptions.RemoteResourceLoaders(), localResourceLoader{
WorkingDir: relworkingdir,
})
if len(r.EnvFile) == 0 {
f := filepath.Join(r.ProjectDirectory, ".env")
if s, err := os.Stat(f); err == nil && !s.IsDir() {
r.EnvFile = types.StringList{f}
}
} else {
envFile := []string{}
for _, f := range r.EnvFile {
if !filepath.IsAbs(f) {
f = filepath.Join(configDetails.WorkingDir, f)
s, err := os.Stat(f)
if err != nil {
return err
}
if s.IsDir() {
return fmt.Errorf("%s is not a file", f)
}
}
envFile = append(envFile, f)
}
r.EnvFile = envFile
}
envFromFile, err := dotenv.GetEnvFromFile(configDetails.Environment, r.EnvFile)
@ -89,7 +128,7 @@ func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model
}
config := types.ConfigDetails{
WorkingDir: r.ProjectDirectory,
WorkingDir: relworkingdir,
ConfigFiles: types.ToConfigFiles(r.Path),
Environment: configDetails.Environment.Clone().Merge(envFromFile),
}

View File

@ -64,6 +64,8 @@ type Options struct {
SkipInclude bool
// SkipResolveEnvironment will ignore computing `environment` for services
SkipResolveEnvironment bool
// SkipDefaultValues will ignore missing required attributes
SkipDefaultValues bool
// Interpolation options
Interpolate *interp.Options
// Discard 'env_file' entries after resolving to 'environment' section
@ -76,6 +78,19 @@ type Options struct {
Profiles []string
// ResourceLoaders manages support for remote resources
ResourceLoaders []ResourceLoader
// KnownExtensions manages x-* attribute we know and the corresponding go structs
KnownExtensions map[string]any
// Metada for telemetry
Listeners []Listener
}
type Listener = func(event string, metadata map[string]any)
// Invoke all listeners for an event
func (o *Options) ProcessEvent(event string, metadata map[string]any) {
for _, l := range o.Listeners {
l(event, metadata)
}
}
// ResourceLoader is a plugable remote resource resolver
@ -148,6 +163,8 @@ func (o *Options) clone() *Options {
projectNameImperativelySet: o.projectNameImperativelySet,
Profiles: o.Profiles,
ResourceLoaders: o.ResourceLoaders,
KnownExtensions: o.KnownExtensions,
Listeners: o.Listeners,
}
}
@ -288,18 +305,17 @@ func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, opt
}
opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{configDetails.WorkingDir})
projectName, err := projectName(configDetails, opts)
err := projectName(configDetails, opts)
if err != nil {
return nil, err
}
opts.projectName = projectName
// TODO(milas): this should probably ALWAYS set (overriding any existing)
if _, ok := configDetails.Environment[consts.ComposeProjectName]; !ok && projectName != "" {
if _, ok := configDetails.Environment[consts.ComposeProjectName]; !ok && opts.projectName != "" {
if configDetails.Environment == nil {
configDetails.Environment = map[string]string{}
}
configDetails.Environment[consts.ComposeProjectName] = projectName
configDetails.Environment[consts.ComposeProjectName] = opts.projectName
}
return load(ctx, configDetails, opts, nil)
@ -312,7 +328,7 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
)
for _, file := range config.ConfigFiles {
fctx := context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename)
if len(file.Content) == 0 && file.Config == nil {
if file.Content == nil && file.Config == nil {
content, err := os.ReadFile(file.Filename)
if err != nil {
return nil, err
@ -352,6 +368,14 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
}
}
if !opts.SkipInclude {
included = append(included, config.ConfigFiles[0].Filename)
err = ApplyInclude(ctx, config, cfg, opts, included)
if err != nil {
return err
}
}
dict, err = override.Merge(dict, cfg)
if err != nil {
return err
@ -400,9 +424,14 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
return nil, err
}
if !opts.SkipInclude {
included = append(included, config.ConfigFiles[0].Filename)
err = ApplyInclude(ctx, config, dict, opts, included)
// Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override
dict, err = override.EnforceUnicity(dict)
if err != nil {
return nil, err
}
if !opts.SkipDefaultValues {
dict, err = transform.SetDefaultValues(dict)
if err != nil {
return nil, err
}
@ -424,6 +453,7 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option
return nil, err
}
}
resolveServicesEnvironment(dict, config)
return dict, nil
}
@ -438,8 +468,6 @@ func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options,
}
loaded = append(loaded, mainFile)
includeRefs := make(map[string][]types.IncludeConfig)
dict, err := loadYamlModel(ctx, configDetails, opts, &cycleTracker{}, nil)
if err != nil {
return nil, err
@ -449,6 +477,10 @@ func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options,
return nil, errors.New("empty compose file")
}
if opts.projectName == "" {
return nil, errors.New("project name must not be empty")
}
project := &types.Project{
Name: opts.projectName,
WorkingDir: configDetails.WorkingDir,
@ -456,14 +488,14 @@ func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options,
}
delete(dict, "name") // project name set by yaml must be identified by caller as opts.projectName
dict = groupXFieldsIntoExtensions(dict, tree.NewPath())
err = Transform(dict, project)
dict, err = processExtensions(dict, tree.NewPath(), opts.KnownExtensions)
if err != nil {
return nil, err
}
if len(includeRefs) != 0 {
project.IncludeReferences = includeRefs
err = Transform(dict, project)
if err != nil {
return nil, err
}
if !opts.SkipNormalization {
@ -516,69 +548,68 @@ func InvalidProjectNameErr(v string) error {
//
// TODO(milas): restructure loading so that we don't need to re-parse the YAML
// here, as it's both wasteful and makes this code error-prone.
func projectName(details types.ConfigDetails, opts *Options) (string, error) {
projectName, projectNameImperativelySet := opts.GetProjectName()
func projectName(details types.ConfigDetails, opts *Options) error {
if opts.projectNameImperativelySet {
if NormalizeProjectName(opts.projectName) != opts.projectName {
return InvalidProjectNameErr(opts.projectName)
}
return nil
}
type named struct {
Name string `yaml:"name"`
}
// if user did NOT provide a name explicitly, then see if one is defined
// in any of the config files
if !projectNameImperativelySet {
var pjNameFromConfigFile string
for _, configFile := range details.ConfigFiles {
content := configFile.Content
if content == nil {
// This can be hit when Filename is set but Content is not. One
// example is when using ToConfigFiles().
d, err := os.ReadFile(configFile.Filename)
if err != nil {
return "", fmt.Errorf("failed to read file %q: %w", configFile.Filename, err)
}
content = d
var pjNameFromConfigFile string
for _, configFile := range details.ConfigFiles {
content := configFile.Content
if content == nil {
// This can be hit when Filename is set but Content is not. One
// example is when using ToConfigFiles().
d, err := os.ReadFile(configFile.Filename)
if err != nil {
return fmt.Errorf("failed to read file %q: %w", configFile.Filename, err)
}
content = d
configFile.Content = d
}
var n named
r := bytes.NewReader(content)
decoder := yaml.NewDecoder(r)
for {
err := decoder.Decode(&n)
if err != nil && errors.Is(err, io.EOF) {
break
}
yml, err := ParseYAML(content)
if err != nil {
// HACK: the way that loading is currently structured, this is
// a duplicative parse just for the `name`. if it fails, we
// give up but don't return the error, knowing that it'll get
// caught downstream for us
return "", nil
break
}
if val, ok := yml["name"]; ok && val != "" {
sVal, ok := val.(string)
if !ok {
// HACK: see above - this is a temporary parsed version
// that hasn't been schema-validated, but we don't want
// to be the ones to actually report that, so give up,
// knowing that it'll get caught downstream for us
return "", nil
}
pjNameFromConfigFile = sVal
if n.Name != "" {
pjNameFromConfigFile = n.Name
}
}
if !opts.SkipInterpolation {
interpolated, err := interp.Interpolate(
map[string]interface{}{"name": pjNameFromConfigFile},
*opts.Interpolate,
)
if err != nil {
return "", err
}
pjNameFromConfigFile = interpolated["name"].(string)
}
pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile)
if pjNameFromConfigFile != "" {
projectName = pjNameFromConfigFile
}
if !opts.SkipInterpolation {
interpolated, err := interp.Interpolate(
map[string]interface{}{"name": pjNameFromConfigFile},
*opts.Interpolate,
)
if err != nil {
return err
}
pjNameFromConfigFile = interpolated["name"].(string)
}
if projectName == "" {
return "", errors.New("project name must not be empty")
pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile)
if pjNameFromConfigFile != "" {
opts.projectName = pjNameFromConfigFile
}
if NormalizeProjectName(projectName) != projectName {
return "", InvalidProjectNameErr(projectName)
}
return projectName, nil
return nil
}
func NormalizeProjectName(s string) string {
@ -596,8 +627,9 @@ var userDefinedKeys = []tree.Path{
"configs",
}
func groupXFieldsIntoExtensions(dict map[string]interface{}, p tree.Path) map[string]interface{} {
extras := map[string]interface{}{}
func processExtensions(dict map[string]any, p tree.Path, extensions map[string]any) (map[string]interface{}, error) {
extras := map[string]any{}
var err error
for key, value := range dict {
skip := false
for _, uk := range userDefinedKeys {
@ -613,19 +645,35 @@ func groupXFieldsIntoExtensions(dict map[string]interface{}, p tree.Path) map[st
}
switch v := value.(type) {
case map[string]interface{}:
dict[key] = groupXFieldsIntoExtensions(v, p.Next(key))
dict[key], err = processExtensions(v, p.Next(key), extensions)
if err != nil {
return nil, err
}
case []interface{}:
for i, e := range v {
if m, ok := e.(map[string]interface{}); ok {
v[i] = groupXFieldsIntoExtensions(m, p.Next(strconv.Itoa(i)))
v[i], err = processExtensions(m, p.Next(strconv.Itoa(i)), extensions)
if err != nil {
return nil, err
}
}
}
}
}
for name, val := range extras {
if typ, ok := extensions[name]; ok {
target := reflect.New(reflect.TypeOf(typ)).Elem().Interface()
err = Transform(val, &target)
if err != nil {
return nil, err
}
extras[name] = target
}
}
if len(extras) > 0 {
dict[consts.Extensions] = extras
}
return dict
return dict, nil
}
// Transform converts the source into the target struct with compose types transformer

View File

@ -37,25 +37,6 @@ func ResolveRelativePaths(project *types.Project) error {
return err
}
project.ComposeFiles = absComposeFiles
// don't coerce a nil map to an empty map
if project.IncludeReferences != nil {
absIncludes := make(map[string][]types.IncludeConfig, len(project.IncludeReferences))
for filename, config := range project.IncludeReferences {
filename = absPath(project.WorkingDir, filename)
absConfigs := make([]types.IncludeConfig, len(config))
for i, c := range config {
absConfigs[i] = types.IncludeConfig{
Path: resolvePaths(project.WorkingDir, c.Path),
ProjectDirectory: absPath(project.WorkingDir, c.ProjectDirectory),
EnvFile: resolvePaths(project.WorkingDir, c.EnvFile),
}
}
absIncludes[filename] = absConfigs
}
project.IncludeReferences = absIncludes
}
return nil
}

View File

@ -29,6 +29,7 @@ import (
// checkConsistency validate a compose model is consistent
func checkConsistency(project *types.Project) error {
containerNames := map[string]string{}
for _, s := range project.Services {
if s.Build == nil && s.Image == "" {
return fmt.Errorf("service %q has neither an image nor a build context specified: %w", s.Name, errdefs.ErrInvalid)
@ -123,6 +124,13 @@ func checkConsistency(project *types.Project) error {
s.Deploy.Replicas = s.Scale
}
if s.ContainerName != "" {
if existing, ok := containerNames[s.ContainerName]; ok {
return fmt.Errorf(`"services.%s": container name "%s" is already in use by "services.%s": %w`, s.Name, s.ContainerName, existing, errdefs.ErrInvalid)
}
containerNames[s.ContainerName] = s.Name
}
if s.GetScale() > 1 && s.ContainerName != "" {
attr := "scale"
if s.Scale == nil {
@ -131,6 +139,15 @@ func checkConsistency(project *types.Project) error {
return fmt.Errorf("services.%s: can't set container_name and %s as container name must be unique: %w", attr,
s.Name, errdefs.ErrInvalid)
}
if s.Develop != nil && s.Develop.Watch != nil {
for _, watch := range s.Develop.Watch {
if watch.Action != types.WatchActionRebuild && watch.Target == "" {
return fmt.Errorf("services.%s.develop.watch: target is required for non-rebuild actions: %w", s.Name, errdefs.ErrInvalid)
}
}
}
}
for name, secret := range project.Secrets {

View File

@ -17,6 +17,7 @@
package override
import (
"cmp"
"fmt"
"strings"
@ -40,10 +41,13 @@ var mergeSpecials = map[tree.Path]merger{}
func init() {
mergeSpecials["networks.*.ipam.config"] = mergeIPAMConfig
mergeSpecials["networks.*.labels"] = mergeToSequence
mergeSpecials["volumes.*.labels"] = mergeToSequence
mergeSpecials["services.*.annotations"] = mergeToSequence
mergeSpecials["services.*.build"] = mergeBuild
mergeSpecials["services.*.build.args"] = mergeToSequence
mergeSpecials["services.*.build.additional_contexts"] = mergeToSequence
mergeSpecials["services.*.build.extra_hosts"] = mergeToSequence
mergeSpecials["services.*.build.labels"] = mergeToSequence
mergeSpecials["services.*.command"] = override
mergeSpecials["services.*.depends_on"] = mergeDependsOn
@ -178,8 +182,8 @@ func convertIntoSequence(value any) []any {
}
i++
}
slices.SortFunc(seq, func(a, b any) bool {
return a.(string) < b.(string)
slices.SortFunc(seq, func(a, b any) int {
return cmp.Compare(a.(string), b.(string))
})
return seq
case []any:

View File

@ -107,13 +107,17 @@ func enforceUnicity(value any, p tree.Path) (any, error) {
return value, nil
}
func keyValueIndexer(y any, _ tree.Path) (string, error) {
value := y.(string)
key, _, found := strings.Cut(value, "=")
if !found {
return value, nil
func keyValueIndexer(y any, p tree.Path) (string, error) {
switch value := y.(type) {
case string:
key, _, found := strings.Cut(value, "=")
if !found {
return value, nil
}
return key, nil
default:
return "", fmt.Errorf("%s: unexpected type %T", p, y)
}
return key, nil
}
func volumeIndexer(y any, p tree.Path) (string, error) {

View File

@ -455,6 +455,7 @@
"type": "array",
"items": {
"type": "object",
"required": ["path", "action"],
"properties": {
"ignore": {"type": "array", "items": {"type": "string"}},
"path": {"type": "string"},
@ -462,7 +463,6 @@
"target": {"type": "string"}
}
},
"required": ["path", "action"],
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}

View File

@ -258,7 +258,7 @@ func getFirstBraceClosingIndex(s string) int {
return i
}
}
if strings.HasPrefix(s[i:], "${") {
if s[i] == '{' {
openVariableBraces++
i++
}

View File

@ -25,9 +25,6 @@ import (
func transformBuild(data any, p tree.Path) (any, error) {
switch v := data.(type) {
case map[string]any:
if _, ok := v["context"]; !ok {
v["context"] = "." // TODO(ndeloof) maybe we miss an explicit "set-defaults" loading phase
}
return transformMapping(v, p)
case string:
return map[string]any{
@ -37,3 +34,15 @@ func transformBuild(data any, p tree.Path) (any, error) {
return data, fmt.Errorf("%s: invalid type %T for build", p, v)
}
}
func defaultBuildContext(data any, _ tree.Path) (any, error) {
switch v := data.(type) {
case map[string]any:
if _, ok := v["context"]; !ok {
v["context"] = "."
}
return v, nil
default:
return data, nil
}
}

View File

@ -0,0 +1,87 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transform
import (
"github.com/compose-spec/compose-go/v2/tree"
)
var defaultValues = map[tree.Path]transformFunc{}
func init() {
defaultValues["services.*.build"] = defaultBuildContext
defaultValues["services.*.secrets.*"] = defaultSecretMount
}
// SetDefaultValues transforms a compose model to set default values to missing attributes
func SetDefaultValues(yaml map[string]any) (map[string]any, error) {
result, err := setDefaults(yaml, tree.NewPath())
if err != nil {
return nil, err
}
return result.(map[string]any), nil
}
func setDefaults(data any, p tree.Path) (any, error) {
for pattern, transformer := range defaultValues {
if p.Matches(pattern) {
t, err := transformer(data, p)
if err != nil {
return nil, err
}
return t, nil
}
}
switch v := data.(type) {
case map[string]any:
a, err := setDefaultsMapping(v, p)
if err != nil {
return a, err
}
return v, nil
case []any:
a, err := setDefaultsSequence(v, p)
if err != nil {
return a, err
}
return v, nil
default:
return data, nil
}
}
func setDefaultsSequence(v []any, p tree.Path) ([]any, error) {
for i, e := range v {
t, err := setDefaults(e, p.Next("[]"))
if err != nil {
return nil, err
}
v[i] = t
}
return v, nil
}
func setDefaultsMapping(v map[string]any, p tree.Path) (map[string]any, error) {
for k, e := range v {
t, err := setDefaults(e, p.Next(k))
if err != nil {
return nil, err
}
v[k] = t
}
return v, nil
}

View File

@ -48,7 +48,7 @@ func transformPorts(data any, p tree.Path) (any, error) {
case string:
parsed, err := types.ParsePortConfig(value)
if err != nil {
return data, err
return data, nil
}
if err != nil {
return nil, err

View File

@ -34,3 +34,16 @@ func transformFileMount(data any, p tree.Path) (any, error) {
return nil, fmt.Errorf("%s: unsupported type %T", p, data)
}
}
func defaultSecretMount(data any, p tree.Path) (any, error) {
switch v := data.(type) {
case map[string]any:
source := v["source"]
if _, ok := v["target"]; !ok {
v["target"] = fmt.Sprintf("/run/secrets/%s", source)
}
return v, nil
default:
return nil, fmt.Errorf("%s: unsupported type %T", p, data)
}
}

View File

@ -17,7 +17,7 @@
package types
type DevelopConfig struct {
Watch []Trigger `json:"watch,omitempty"`
Watch []Trigger `yaml:"watch,omitempty" json:"watch,omitempty"`
Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
}
@ -31,8 +31,8 @@ const (
)
type Trigger struct {
Path string `json:"path,omitempty"`
Action WatchAction `json:"action,omitempty"`
Target string `json:"target,omitempty"`
Ignore []string `json:"ignore,omitempty"`
Path string `yaml:"path" json:"path"`
Action WatchAction `yaml:"action" json:"action"`
Target string `yaml:"target,omitempty" json:"target,omitempty"`
Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"`
}

View File

@ -24,7 +24,33 @@ import (
)
// HostsList is a list of colon-separated host-ip mappings
type HostsList map[string]string
type HostsList map[string][]string
// NewHostsList creates a HostsList from a list of `host=ip` strings
func NewHostsList(hosts []string) (HostsList, error) {
list := HostsList{}
for _, s := range hosts {
var found bool
for _, sep := range hostListSerapators {
host, ip, ok := strings.Cut(s, sep)
if ok {
// Mapping found with this separator, stop here.
if ips, ok := list[host]; ok {
list[host] = append(ips, ip)
} else {
list[host] = []string{ip}
}
found = true
break
}
}
if !found {
return nil, fmt.Errorf("invalid additional host, missing IP: %s", s)
}
}
err := list.cleanup()
return list, err
}
// AsList returns host-ip mappings as a list of strings, using the given
// separator. The Docker Engine API expects ':' separators, the original format
@ -34,7 +60,9 @@ type HostsList map[string]string
func (h HostsList) AsList(sep string) []string {
l := make([]string, 0, len(h))
for k, v := range h {
l = append(l, fmt.Sprintf("%s%s%s", k, sep, v))
for _, ip := range v {
l = append(l, fmt.Sprintf("%s%s%s", k, sep, ip))
}
}
return l
}
@ -51,6 +79,8 @@ func (h HostsList) MarshalJSON() ([]byte, error) {
return json.Marshal(list)
}
var hostListSerapators = []string{"=", ":"}
func (h *HostsList) DecodeMapstructure(value interface{}) error {
switch v := value.(type) {
case map[string]interface{}:
@ -59,25 +89,45 @@ func (h *HostsList) DecodeMapstructure(value interface{}) error {
if e == nil {
e = ""
}
list[i] = fmt.Sprint(e)
list[i] = []string{fmt.Sprint(e)}
}
err := list.cleanup()
if err != nil {
return err
}
*h = list
return nil
case []interface{}:
*h = decodeMapping(v, "=", ":")
s := make([]string, len(v))
for i, e := range v {
s[i] = fmt.Sprint(e)
}
list, err := NewHostsList(s)
if err != nil {
return err
}
*h = list
return nil
default:
return fmt.Errorf("unexpected value type %T for mapping", value)
}
for host, ip := range *h {
}
func (h HostsList) cleanup() error {
for host, ips := range h {
// Check that there is a hostname and that it doesn't contain either
// of the allowed separators, to generate a clearer error than the
// engine would do if it splits the string differently.
if host == "" || strings.ContainsAny(host, ":=") {
return fmt.Errorf("bad host name '%s'", host)
}
// Remove brackets from IP addresses (for example "[::1]" -> "::1").
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
(*h)[host] = ip[1 : len(ip)-1]
for i, ip := range ips {
// Remove brackets from IP addresses (for example "[::1]" -> "::1").
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
ips[i] = ip[1 : len(ip)-1]
}
}
h[host] = ips
}
return nil
}

View File

@ -29,6 +29,7 @@ import (
"github.com/distribution/reference"
"github.com/mitchellh/copystructure"
godigest "github.com/opencontainers/go-digest"
"golang.org/x/exp/maps"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3"
)
@ -46,13 +47,8 @@ type Project struct {
Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"`
Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` // https://github.com/golang/go/issues/6213
// IncludeReferences is keyed by Compose YAML filename and contains config for
// other Compose YAML files it directly triggered a load of via `include`.
//
// Note: this is
IncludeReferences map[string][]IncludeConfig `yaml:"-" json:"-"`
ComposeFiles []string `yaml:"-" json:"-"`
Environment Mapping `yaml:"-" json:"-"`
ComposeFiles []string `yaml:"-" json:"-"`
Environment Mapping `yaml:"-" json:"-"`
// DisabledServices track services which have been disable as profile is not active
DisabledServices Services `yaml:"-" json:"-"`
@ -119,6 +115,58 @@ func (p *Project) ConfigNames() []string {
return names
}
func (p *Project) ServicesWithBuild() []string {
servicesBuild := p.Services.Filter(func(s ServiceConfig) bool {
return s.Build != nil && s.Build.Context != ""
})
return maps.Keys(servicesBuild)
}
func (p *Project) ServicesWithExtends() []string {
servicesExtends := p.Services.Filter(func(s ServiceConfig) bool {
return s.Extends != nil && *s.Extends != (ExtendsConfig{})
})
return maps.Keys(servicesExtends)
}
func (p *Project) ServicesWithDependsOn() []string {
servicesDependsOn := p.Services.Filter(func(s ServiceConfig) bool {
return len(s.DependsOn) > 0
})
return maps.Keys(servicesDependsOn)
}
func (p *Project) ServicesWithCapabilities() ([]string, []string, []string) {
capabilities := []string{}
gpu := []string{}
tpu := []string{}
for _, service := range p.Services {
deploy := service.Deploy
if deploy == nil {
continue
}
reservation := deploy.Resources.Reservations
if reservation == nil {
continue
}
devices := reservation.Devices
for _, d := range devices {
if len(d.Capabilities) > 0 {
capabilities = append(capabilities, service.Name)
}
for _, c := range d.Capabilities {
if c == "gpu" {
gpu = append(gpu, service.Name)
} else if c == "tpu" {
tpu = append(tpu, service.Name)
}
}
}
}
return utils.RemoveDuplicates(capabilities), utils.RemoveDuplicates(gpu), utils.RemoveDuplicates(tpu)
}
// GetServices retrieve services by names, or return all services if no name specified
func (p *Project) GetServices(names ...string) (Services, error) {
if len(names) == 0 {

View File

@ -33,3 +33,13 @@ func (s Services) GetProfiles() []string {
}
return profiles
}
func (s Services) Filter(predicate func(ServiceConfig) bool) Services {
services := Services{}
for name, service := range s {
if predicate(service) {
services[name] = service
}
}
return services
}

View File

@ -162,6 +162,9 @@ func (s *ServiceConfig) NetworksByPriority() []string {
})
}
sort.Slice(keys, func(i, j int) bool {
if keys[i].priority == keys[j].priority {
return keys[i].name < keys[j].name
}
return keys[i].priority > keys[j].priority
})
var sorted []string

View File

@ -51,3 +51,18 @@ func ArrayContains[T comparable](source []T, toCheck []T) bool {
}
return true
}
func RemoveDuplicates[T comparable](slice []T) []T {
// Create a map to store unique elements
seen := make(map[T]bool)
result := []T{}
// Loop through the slice, adding elements to the map if they haven't been seen before
for _, val := range slice {
if _, ok := seen[val]; !ok {
seen[val] = true
result = append(result, val)
}
}
return result
}

View File

@ -1,39 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.13
package poly1305
// Generic fallbacks for the math/bits intrinsics, copied from
// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
// variable time fallbacks until Go 1.13.
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
sum = x + y + carry
carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
return
}
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
diff = x - y - borrow
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
return
}
func bitsMul64(x, y uint64) (hi, lo uint64) {
const mask32 = 1<<32 - 1
x0 := x & mask32
x1 := x >> 32
y0 := y & mask32
y1 := y >> 32
w0 := x0 * y0
t := x1*y0 + w0>>32
w1 := t & mask32
w2 := t >> 32
w1 += x0 * y1
hi = x1*y1 + w2 + w1>>32
lo = x * y
return
}

View File

@ -1,21 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13
package poly1305
import "math/bits"
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
return bits.Add64(x, y, carry)
}
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
return bits.Sub64(x, y, borrow)
}
func bitsMul64(x, y uint64) (hi, lo uint64) {
return bits.Mul64(x, y)
}

View File

@ -7,7 +7,10 @@
package poly1305
import "encoding/binary"
import (
"encoding/binary"
"math/bits"
)
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
// for a 64 bytes message is approximately
@ -114,13 +117,13 @@ type uint128 struct {
}
func mul64(a, b uint64) uint128 {
hi, lo := bitsMul64(a, b)
hi, lo := bits.Mul64(a, b)
return uint128{lo, hi}
}
func add128(a, b uint128) uint128 {
lo, c := bitsAdd64(a.lo, b.lo, 0)
hi, c := bitsAdd64(a.hi, b.hi, c)
lo, c := bits.Add64(a.lo, b.lo, 0)
hi, c := bits.Add64(a.hi, b.hi, c)
if c != 0 {
panic("poly1305: unexpected overflow")
}
@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) {
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
// add 1 to the most significant (2¹²⁸) limb, h2.
if len(msg) >= TagSize {
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h2 += c + 1
msg = msg[TagSize:]
@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) {
copy(buf[:], msg)
buf[len(msg)] = 1
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h2 += c
msg = nil
@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) {
m3 := h2r1
t0 := m0.lo
t1, c := bitsAdd64(m1.lo, m0.hi, 0)
t2, c := bitsAdd64(m2.lo, m1.hi, c)
t3, _ := bitsAdd64(m3.lo, m2.hi, c)
t1, c := bits.Add64(m1.lo, m0.hi, 0)
t2, c := bits.Add64(m2.lo, m1.hi, c)
t3, _ := bits.Add64(m3.lo, m2.hi, c)
// Now we have the result as 4 64-bit limbs, and we need to reduce it
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) {
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h0, c = bits.Add64(h0, cc.lo, 0)
h1, c = bits.Add64(h1, cc.hi, c)
h2 += c
cc = shiftRightBy2(cc)
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h0, c = bits.Add64(h0, cc.lo, 0)
h1, c = bits.Add64(h1, cc.hi, c)
h2 += c
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
// result if the subtraction underflows, and t otherwise.
hMinusP0, b := bitsSub64(h0, p0, 0)
hMinusP1, b := bitsSub64(h1, p1, b)
_, b = bitsSub64(h2, p2, b)
hMinusP0, b := bits.Sub64(h0, p0, 0)
hMinusP1, b := bits.Sub64(h1, p1, b)
_, b = bits.Sub64(h2, p2, b)
// h = h if h < p else h - p
h0 = select64(b, h0, hMinusP0)
@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
//
// by just doing a wide addition with the 128 low bits of h and discarding
// the overflow.
h0, c := bitsAdd64(h0, s[0], 0)
h1, _ = bitsAdd64(h1, s[1], c)
h0, c := bits.Add64(h0, s[0], 0)
h1, _ = bits.Add64(h1, s[1], c)
binary.LittleEndian.PutUint64(out[0:8], h0)
binary.LittleEndian.PutUint64(out[8:16], h1)

44
vendor/golang.org/x/exp/slices/cmp.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import "golang.org/x/exp/constraints"
// min is a version of the predeclared function from the Go 1.21 release.
func min[T constraints.Ordered](a, b T) T {
if a < b || isNaN(a) {
return a
}
return b
}
// max is a version of the predeclared function from the Go 1.21 release.
func max[T constraints.Ordered](a, b T) T {
if a > b || isNaN(a) {
return a
}
return b
}
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
func cmpLess[T constraints.Ordered](x, y T) bool {
return (isNaN(x) && !isNaN(y)) || x < y
}
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
func cmpCompare[T constraints.Ordered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN && yNaN {
return 0
}
if xNaN || x < y {
return -1
}
if yNaN || x > y {
return +1
}
return 0
}

View File

@ -3,23 +3,20 @@
// license that can be found in the LICENSE file.
// Package slices defines various functions useful with slices of any type.
// Unless otherwise specified, these functions all apply to the elements
// of a slice at index 0 <= i < len(s).
//
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
// or the sorting may fail to sort correctly. A common case is when sorting slices of
// floating-point numbers containing NaN values.
package slices
import "golang.org/x/exp/constraints"
import (
"unsafe"
"golang.org/x/exp/constraints"
)
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[E comparable](s1, s2 []E) bool {
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool {
return true
}
// EqualFunc reports whether two slices are equal using a comparison
// EqualFunc reports whether two slices are equal using an equality
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
return true
}
// Compare compares the elements of s1 and s2.
// The elements are compared sequentially, starting at index 0,
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
// of elements. The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
// Comparisons involving floating point NaNs are ignored.
func Compare[E constraints.Ordered](s1, s2 []E) int {
s2len := len(s2)
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
if i >= s2len {
if i >= len(s2) {
return +1
}
v2 := s2[i]
switch {
case v1 < v2:
return -1
case v1 > v2:
return +1
if c := cmpCompare(v1, v2); c != 0 {
return c
}
}
if len(s1) < s2len {
if len(s1) < len(s2) {
return -1
}
return 0
}
// CompareFunc is like Compare but uses a comparison function
// on each pair of elements. The elements are compared in increasing
// index order, and the comparisons stop after the first time cmp
// returns non-zero.
// CompareFunc is like [Compare] but uses a custom comparison function on each
// pair of elements.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
s2len := len(s2)
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
if i >= s2len {
if i >= len(s2) {
return +1
}
v2 := s2[i]
@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
return c
}
}
if len(s1) < s2len {
if len(s1) < len(s2) {
return -1
}
return 0
@ -103,7 +92,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[E comparable](s []E, v E) int {
func Index[S ~[]E, E comparable](s S, v E) int {
for i := range s {
if v == s[i] {
return i
@ -114,7 +103,7 @@ func Index[E comparable](s []E, v E) int {
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[E any](s []E, f func(E) bool) int {
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
@ -124,39 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int {
}
// Contains reports whether v is present in s.
func Contains[E comparable](s []E, v E) bool {
func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[E any](s []E, f func(E) bool) bool {
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
}
// Insert inserts the values v... into s at index i,
// returning the modified slice.
// In the returned slice r, r[i] == v[0].
// The elements at s[i:] are shifted up to make room.
// In the returned slice r, r[i] == v[0],
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
tot := len(s) + len(v)
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[i:])
m := len(v)
if m == 0 {
return s
}
n := len(s)
if i == n {
return append(s, v...)
}
if n+m > cap(s) {
// Use append rather than make so that we bump the size of
// the slice up to the next storage class.
// This is what Grow does but we don't call Grow because
// that might copy the values twice.
s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
copy(s2[i+m:], s[i:])
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[i:])
return s2
s = s[:n+m]
// before:
// s: aaaaaaaabbbbccccccccdddd
// ^ ^ ^ ^
// i i+m n n+m
// after:
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
//
// a are the values that don't move in s.
// v are the values copied in from v.
// b and c are the values from s that are shifted up in index.
// d are the values that get overwritten, never to be seen again.
if !overlaps(v, s[i+m:]) {
// Easy case - v does not overlap either the c or d regions.
// (It might be in some of a or b, or elsewhere entirely.)
// The data we copy up doesn't write to v at all, so just do it.
copy(s[i+m:], s[i:])
// Now we have
// s: aaaaaaaabbbbbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// Note the b values are duplicated.
copy(s[i:], v)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// The hard case - v overlaps c or d. We can't just shift up
// the data because we'd move or clobber the values we're trying
// to insert.
// So instead, write v on top of d, then rotate.
copy(s[n:], v)
// Now we have
// s: aaaaaaaabbbbccccccccvvvv
// ^ ^ ^ ^
// i i+m n n+m
rotateRight(s[i:], m)
// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete modifies the contents of the slice s; it does not create a new slice.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
@ -175,39 +229,106 @@ func Delete[S ~[]E, E any](s S, i, j int) S {
// zeroing those elements so that objects they reference can be garbage
// collected.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for i, v := range s {
if del(v) {
j := i
for i++; i < len(s); i++ {
v = s[i]
if !del(v) {
s[j] = v
j++
}
}
return s[:j]
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
return s
return s[:i]
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
if i == j {
return Insert(s, i, v...)
}
if j == len(s) {
return append(s[:i], v...)
}
tot := len(s[:i]) + len(v) + len(s[j:])
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[j:])
if tot > cap(s) {
// Too big to fit, allocate and copy over.
s2 := append(s[:i], make(S, tot-i)...) // See Insert
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
r := s[:tot]
if i+len(v) <= j {
// Easy, as v fits in the deleted portion.
copy(r[i:], v)
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
return r
}
// We are expanding (v is bigger than j-i).
// The situation is something like this:
// (example has i=4,j=8,len(s)=16,len(v)=6)
// s: aaaaxxxxbbbbbbbbyy
// ^ ^ ^ ^
// i j len(s) tot
// a: prefix of s
// x: deleted range
// b: more of s
// y: area to expand into
if !overlaps(r[i+len(v):], v) {
// Easy, as v is not clobbered by the first copy.
copy(r[i+len(v):], s[j:])
copy(r[i:], v)
return r
}
// This is a situation where we don't have a single place to which
// we can copy v. Parts of it need to go to two different places.
// We want to copy the prefix of v into y and the suffix into x, then
// rotate |y| spots to the right.
//
// v[2:] v[:2]
// | |
// s: aaaavvvvbbbbbbbbvv
// ^ ^ ^ ^
// i j len(s) tot
//
// If either of those two destinations don't alias v, then we're good.
y := len(v) - (j - i) // length of y portion
if !overlaps(r[i:j], v) {
copy(r[i:j], v[y:])
copy(r[len(s):], v[:y])
rotateRight(r[i:], y)
return r
}
if !overlaps(r[len(s):], v) {
copy(r[len(s):], v[:y])
copy(r[i:j], v[y:])
rotateRight(r[i:], y)
return r
}
// Now we know that v overlaps both x and y.
// That means that the entirety of b is *inside* v.
// So we don't need to preserve b at all; instead we
// can copy v first, then copy the b part of v out of
// v to the right destination.
k := startIdx(v, s[j:])
copy(r[i:], v)
copy(r[i+len(v):], r[i+k:])
return r
}
// Clone returns a copy of the slice.
@ -222,7 +343,8 @@ func Clone[S ~[]E, E any](s S) S {
// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s; it does not create a new slice.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
@ -242,7 +364,8 @@ func Compact[S ~[]E, E comparable](s S) S {
return s[:i]
}
// CompactFunc is like Compact but uses a comparison function.
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
@ -280,3 +403,97 @@ func Grow[S ~[]E, E any](s S, n int) S {
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}
// Rotation algorithm explanation:
//
// rotate left by 2
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join first parts
// 89234567 01
// recursively rotate first left part by 2
// 23456789 01
// join at the end
// 2345678901
//
// rotate left by 8
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join last parts
// 89 23456701
// recursively rotate second part left by 6
// 89 01234567
// join at the end
// 8901234567
// TODO: There are other rotate algorithms.
// This algorithm has the desirable property that it moves each element exactly twice.
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
// rotateLeft rotates b left by n spaces.
// s_final[i] = s_orig[i+r], wrapping around.
func rotateLeft[E any](s []E, r int) {
for r != 0 && r != len(s) {
if r*2 <= len(s) {
swap(s[:r], s[len(s)-r:])
s = s[:len(s)-r]
} else {
swap(s[:len(s)-r], s[r:])
s, r = s[len(s)-r:], r*2-len(s)
}
}
}
func rotateRight[E any](s []E, r int) {
rotateLeft(s, len(s)-r)
}
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
func swap[E any](x, y []E) {
for i := 0; i < len(x); i++ {
x[i], y[i] = y[i], x[i]
}
}
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
func overlaps[E any](a, b []E) bool {
if len(a) == 0 || len(b) == 0 {
return false
}
elemSize := unsafe.Sizeof(a[0])
if elemSize == 0 {
return false
}
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
// Also see crypto/internal/alias/alias.go:AnyOverlap
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
}
// startIdx returns the index in haystack where the needle starts.
// prerequisite: the needle must be aliased entirely inside the haystack.
func startIdx[E any](haystack, needle []E) int {
p := &needle[0]
for i := range haystack {
if p == &haystack[i] {
return i
}
}
// TODO: what if the overlap is by a non-integral number of Es?
panic("needle not found")
}
// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
package slices
import (
@ -11,57 +13,116 @@ import (
)
// Sort sorts a slice of any ordered type in ascending order.
// Sort may fail to sort correctly when sorting slices of floating-point
// numbers containing Not-a-number (NaN) values.
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
// instead if the input may contain NaNs.
func Sort[E constraints.Ordered](x []E) {
// When sorting floating-point numbers, NaNs are ordered before other values.
func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}
// SortFunc sorts the slice x in ascending order as determined by the less function.
// This sort is not guaranteed to be stable.
// SortFunc sorts the slice x in ascending order as determined by the cmp
// function. This sort is not guaranteed to be stable.
// cmp(a, b) should return a negative number when a < b, a positive number when
// a > b and zero when a == b.
//
// SortFunc requires that less is a strict weak ordering.
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
func SortFunc[E any](x []E, less func(a, b E) bool) {
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using less to compare elements.
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
stableLessFunc(x, len(x), less)
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
stableCmpFunc(x, len(x), cmp)
}
// IsSorted reports whether x is sorted in ascending order.
func IsSorted[E constraints.Ordered](x []E) bool {
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
if x[i] < x[i-1] {
if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
}
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
// comparison function.
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
if less(x[i], x[i-1]) {
if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Min[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Min: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = min(m, x[i])
}
return m
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MinFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) < 0 {
m = x[i]
}
}
return m
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Max[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Max: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = max(m, x[i])
}
return m
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MaxFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) > 0 {
m = x[i]
}
}
return m
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
@ -70,24 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if x[h] < target {
if cmpLess(x[h], target) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && x[i] == target
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
}
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
// function. The slice must be sorted in increasing order, where "increasing"
// is defined by cmp. cmp should return 0 if the slice element matches
// the target, a negative number if the slice element precedes the target,
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
@ -126,3 +187,9 @@ func (r *xorshift) Next() uint64 {
func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}
// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
func isNaN[T constraints.Ordered](x T) bool {
return x != x
}

View File

@ -6,28 +6,28 @@
package slices
// insertionSortLessFunc sorts data[a:b] using insertion sort.
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && less(data[j], data[j-1]); j-- {
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownLessFunc implements the heap property on data[lo:hi].
// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && less(data[first+child], data[first+child+1]) {
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
if !less(data[first+root], data[first+child]) {
if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool
}
}
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownLessFunc(data, i, hi, first, less)
siftDownCmpFunc(data, i, hi, first, cmp)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownLessFunc(data, lo, i, first, less)
siftDownCmpFunc(data, lo, i, first, cmp)
}
}
// pdqsortLessFunc sorts data[a:b].
// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12
var (
@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
length := b - a
if length <= maxInsertion {
insertionSortLessFunc(data, a, b, less)
insertionSortCmpFunc(data, a, b, cmp)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortLessFunc(data, a, b, less)
heapSortCmpFunc(data, a, b, cmp)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsLessFunc(data, a, b, less)
breakPatternsCmpFunc(data, a, b, cmp)
limit--
}
pivot, hint := choosePivotLessFunc(data, a, b, less)
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
reverseRangeLessFunc(data, a, b, less)
reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortLessFunc(data, a, b, less) {
if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !less(data[a-1], data[pivot]) {
mid := partitionEqualLessFunc(data, a, b, pivot, less)
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortLessFunc(data, a, mid, limit, less)
pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortLessFunc(data, mid+1, b, limit, less)
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}
// partitionLessFunc does one quicksort partition.
// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && less(data[i], data[a]) {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !less(data[j], data[a]) {
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
j--
for {
for i <= j && less(data[i], data[a]) {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !less(data[j], data[a]) {
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
return j, false
}
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !less(data[a], data[i]) {
for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
for i <= j && less(data[a], data[j]) {
for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E)
return i
}
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !less(data[i], data[i-1]) {
for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}
@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !less(data[j], data[j-1]) {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !less(data[j], data[j-1]) {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
return false
}
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
// choosePivotLessFunc chooses a pivot in data[a:b].
// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentLessFunc(data, i, &swaps, less)
j = medianAdjacentLessFunc(data, j, &swaps, less)
k = medianAdjacentLessFunc(data, k, &swaps, less)
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
j = medianLessFunc(data, i, j, k, &swaps, less)
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}
switch swaps {
@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
}
}
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
if less(data[b], data[a]) {
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
a, b = order2LessFunc(data, a, b, swaps, less)
b, c = order2LessFunc(data, b, c, swaps, less)
a, b = order2LessFunc(data, a, b, swaps, less)
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
a, b = order2CmpFunc(data, a, b, swaps, cmp)
b, c = order2CmpFunc(data, b, c, swaps, cmp)
a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
return medianLessFunc(data, a-1, a, a+1, swaps, less)
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
}
}
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortLessFunc(data, a, b, less)
insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
insertionSortLessFunc(data, a, n, less)
insertionSortCmpFunc(data, a, n, cmp)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeLessFunc(data, a, a+blockSize, b, less)
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeLessFunc(data, a, m, n, less)
symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
if less(data[h], data[a]) {
if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !less(data[m], data[h]) {
if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
for start < r {
c := int(uint(start+r) >> 1)
if !less(data[p-c], data[c]) {
if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
end := n - start
if start < m && m < end {
rotateLessFunc(data, start, m, end, less)
rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
symMergeLessFunc(data, a, start, mid, less)
symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
symMergeLessFunc(data, mid, end, b, less)
symMergeCmpFunc(data, mid, end, b, cmp)
}
}
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeLessFunc(data, m-i, m, j, less)
swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
swapRangeLessFunc(data, m-i, m+j-i, i, less)
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
swapRangeLessFunc(data, m-i, m, i, less)
swapRangeCmpFunc(data, m-i, m, i, cmp)
}

View File

@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints"
// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j] < data[j-1]); j-- {
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
if child >= hi {
break
}
if child+1 < hi && (data[first+child] < data[first+child+1]) {
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
if !(data[first+root] < data[first+child]) {
if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(data[a-1] < data[pivot]) {
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && (data[i] < data[a]) {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
j--
for {
for i <= j && (data[i] < data[a]) {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !(data[a] < data[i]) {
for i <= j && !cmpLess(data[a], data[i]) {
i++
}
for i <= j && (data[a] < data[j]) {
for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(data[i] < data[i-1]) {
for i < b && !cmpLess(data[i], data[i-1]) {
i++
}
@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(data[j] < data[j-1]) {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(data[j] < data[j-1]) {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if data[b] < data[a] {
if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := b
for i < j {
h := int(uint(i+j) >> 1)
if data[h] < data[a] {
if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(data[m] < data[h]) {
if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
for start < r {
c := int(uint(start+r) >> 1)
if !(data[p-c] < data[c]) {
if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c

View File

@ -4,6 +4,9 @@
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
//
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors.
package errgroup
import (

View File

@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.22
// +build !go1.22
package versions
// Note: If we use build tags to use go/versions when go >=1.22,
// we run into go.dev/issue/53737. Under some operations users would see an
// import of "go/versions" even if they would not compile the file.
// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
// For this reason, this library just a clone of go/versions for the moment.
// Lang returns the Go language version for version x.
// If x is not a valid version, Lang returns the empty string.
// For example:

View File

@ -1,38 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22
// +build go1.22
package versions
import (
"go/version"
)
// Lang returns the Go language version for version x.
// If x is not a valid version, Lang returns the empty string.
// For example:
//
// Lang("go1.21rc2") = "go1.21"
// Lang("go1.21.2") = "go1.21"
// Lang("go1.21") = "go1.21"
// Lang("go1") = "go1"
// Lang("bad") = ""
// Lang("1.21") = ""
func Lang(x string) string { return version.Lang(x) }
// Compare returns -1, 0, or +1 depending on whether
// x < y, x == y, or x > y, interpreted as Go versions.
// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
// Invalid versions, including the empty string, compare less than
// valid versions and equal to each other.
// The language version "go1.21" compares less than the
// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
// Custom toolchain suffixes are ignored during comparison:
// "go1.21.0" and "go1.21.0-bigcorp" are equal.
func Compare(x, y string) int { return version.Compare(x, y) }
// IsValid reports whether the version x is valid.
func IsValid(x string) bool { return version.IsValid(x) }

16
vendor/modules.txt vendored
View File

@ -131,8 +131,8 @@ github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/compose-spec/compose-go/v2 v2.0.0-rc.3
## explicit; go 1.20
# github.com/compose-spec/compose-go/v2 v2.0.0-rc.8
## explicit; go 1.21
github.com/compose-spec/compose-go/v2/cli
github.com/compose-spec/compose-go/v2/consts
github.com/compose-spec/compose-go/v2/dotenv
@ -832,7 +832,7 @@ go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/metrics/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
# golang.org/x/crypto v0.17.0
# golang.org/x/crypto v0.18.0
## explicit; go 1.18
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
@ -847,7 +847,7 @@ golang.org/x/crypto/pbkdf2
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
# golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
# golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
@ -857,7 +857,7 @@ golang.org/x/exp/slices
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.19.0
# golang.org/x/net v0.20.0
## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/html
@ -874,7 +874,7 @@ golang.org/x/net/trace
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.5.0
# golang.org/x/sync v0.6.0
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
@ -885,7 +885,7 @@ golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
# golang.org/x/term v0.15.0
# golang.org/x/term v0.16.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.14.0
@ -898,7 +898,7 @@ golang.org/x/text/width
# golang.org/x/time v0.3.0
## explicit
golang.org/x/time/rate
# golang.org/x/tools v0.16.1
# golang.org/x/tools v0.17.0
## explicit; go 1.18
golang.org/x/tools/cmd/stringer
golang.org/x/tools/go/gcexportdata