mirror of
https://github.com/docker/buildx.git
synced 2024-11-22 15:37:16 +08:00
vendor: update buildkit to v0.8
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
080e9981c7
commit
69a1419ab1
17
go.mod
17
go.mod
@ -10,12 +10,12 @@ require (
|
|||||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||||
github.com/containerd/console v1.0.1
|
github.com/containerd/console v1.0.1
|
||||||
github.com/containerd/containerd v1.4.1-0.20200903181227-d4e78200d6da
|
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
||||||
github.com/docker/cli v0.0.0-20200911150641-2955ece02443
|
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible
|
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||||
@ -23,28 +23,25 @@ require (
|
|||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
|
||||||
github.com/gofrs/flock v0.7.3
|
github.com/gofrs/flock v0.7.3
|
||||||
github.com/gofrs/uuid v3.2.0+incompatible // indirect
|
github.com/gofrs/uuid v3.2.0+incompatible // indirect
|
||||||
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
||||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/hashicorp/hcl/v2 v2.6.0
|
github.com/hashicorp/hcl/v2 v2.6.0
|
||||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||||
github.com/jinzhu/now v1.0.0 // indirect
|
github.com/jinzhu/now v1.0.0 // indirect
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/lib/pq v1.0.0 // indirect
|
|
||||||
github.com/mattn/go-sqlite3 v1.10.0 // indirect
|
github.com/mattn/go-sqlite3 v1.10.0 // indirect
|
||||||
github.com/miekg/pkcs11 v0.0.0-20190322140431-074fd7a1ed19 // indirect
|
github.com/miekg/pkcs11 v0.0.0-20190322140431-074fd7a1ed19 // indirect
|
||||||
github.com/moby/buildkit v0.7.1-0.20200917171726-2943a0838929
|
github.com/moby/buildkit v0.8.1-0.20201205083753-0af7b1b9c693
|
||||||
github.com/moby/term v0.0.0-20200911173544-4fc2018d01d9 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.7.1 // indirect
|
github.com/prometheus/client_golang v1.7.1 // indirect
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.7.0
|
||||||
github.com/spf13/cobra v1.0.0
|
github.com/spf13/cobra v1.0.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.5.1
|
github.com/stretchr/testify v1.5.1
|
||||||
@ -52,7 +49,7 @@ require (
|
|||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
github.com/zclconf/go-cty v1.4.0
|
github.com/zclconf/go-cty v1.4.0
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
|
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "metadata",
|
|
||||||
"name_pretty": "Google Compute Engine Metadata API",
|
|
||||||
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
|
|
||||||
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
|
|
||||||
"release_level": "ga",
|
|
||||||
"language": "go",
|
|
||||||
"repo": "googleapis/google-cloud-go",
|
|
||||||
"distribution_name": "cloud.google.com/go/compute/metadata",
|
|
||||||
"api_id": "compute:metadata",
|
|
||||||
"requires_billing": false
|
|
||||||
}
|
|
41
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
41
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@ -61,25 +61,14 @@ var (
|
|||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var defaultClient = &Client{hc: &http.Client{
|
||||||
defaultClient = &Client{hc: &http.Client{
|
Transport: &http.Transport{
|
||||||
Transport: &http.Transport{
|
Dial: (&net.Dialer{
|
||||||
Dial: (&net.Dialer{
|
Timeout: 2 * time.Second,
|
||||||
Timeout: 2 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
}).Dial,
|
||||||
}).Dial,
|
},
|
||||||
ResponseHeaderTimeout: 2 * time.Second,
|
}}
|
||||||
},
|
|
||||||
}}
|
|
||||||
subscribeClient = &Client{hc: &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
//
|
//
|
||||||
@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool {
|
|||||||
return name == "Google" || name == "Google Compute Engine"
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
// Subscribe calls Client.Subscribe on the default client.
|
||||||
// ResponseHeaderTimeout).
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
return subscribeClient.Subscribe(suffix, fn)
|
return defaultClient.Subscribe(suffix, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get calls Client.Get on the default client.
|
// Get calls Client.Get on the default client.
|
||||||
@ -280,9 +268,14 @@ type Client struct {
|
|||||||
hc *http.Client
|
hc *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
// NewClient returns a Client that can be used to fetch metadata.
|
||||||
// will use the given http.Client instead of the default client.
|
// Returns the client that uses the specified http.Client for HTTP requests.
|
||||||
|
// If nil is specified, returns the default client.
|
||||||
func NewClient(c *http.Client) *Client {
|
func NewClient(c *http.Client) *Client {
|
||||||
|
if c == nil {
|
||||||
|
return defaultClient
|
||||||
|
}
|
||||||
|
|
||||||
return &Client{hc: c}
|
return &Client{hc: c}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
17
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
17
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -182,13 +182,14 @@ func (s pipeAddress) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||||
for {
|
for {
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return syscall.Handle(0), ctx.Err()
|
return syscall.Handle(0), ctx.Err()
|
||||||
default:
|
default:
|
||||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
|||||||
}
|
}
|
||||||
// Wait 10 msec and try again. This is a rather simplistic
|
// Wait 10 msec and try again. This is a rather simplistic
|
||||||
// view, as we always try each 10 milliseconds.
|
// view, as we always try each 10 milliseconds.
|
||||||
time.Sleep(time.Millisecond * 10)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||||||
if timeout != nil {
|
if timeout != nil {
|
||||||
absTimeout = time.Now().Add(*timeout)
|
absTimeout = time.Now().Add(*timeout)
|
||||||
} else {
|
} else {
|
||||||
absTimeout = time.Now().Add(time.Second * 2)
|
absTimeout = time.Now().Add(2 * time.Second)
|
||||||
}
|
}
|
||||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||||
conn, err := DialPipeContext(ctx, path)
|
conn, err := DialPipeContext(ctx, path)
|
||||||
@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||||
// cancellation or timeout.
|
// cancellation or timeout.
|
||||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||||
|
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||||
|
// cancellation or timeout.
|
||||||
|
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||||
var err error
|
var err error
|
||||||
var h syscall.Handle
|
var h syscall.Handle
|
||||||
h, err = tryDialPipe(ctx, &path)
|
h, err = tryDialPipe(ctx, &path, access)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/appveyor.yml
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/appveyor.yml
generated
vendored
@ -18,6 +18,7 @@ build_script:
|
|||||||
- go build ./cmd/runhcs
|
- go build ./cmd/runhcs
|
||||||
- go build ./cmd/tar2ext4
|
- go build ./cmd/tar2ext4
|
||||||
- go build ./cmd/wclayer
|
- go build ./cmd/wclayer
|
||||||
|
- go build ./cmd/device-util
|
||||||
- go build ./internal/tools/grantvmgroupaccess
|
- go build ./internal/tools/grantvmgroupaccess
|
||||||
- go build ./internal/tools/uvmboot
|
- go build ./internal/tools/uvmboot
|
||||||
- go build ./internal/tools/zapdir
|
- go build ./internal/tools/zapdir
|
||||||
@ -33,6 +34,7 @@ artifacts:
|
|||||||
- path: 'containerd-shim-runhcs-v1.exe'
|
- path: 'containerd-shim-runhcs-v1.exe'
|
||||||
- path: 'runhcs.exe'
|
- path: 'runhcs.exe'
|
||||||
- path: 'tar2ext4.exe'
|
- path: 'tar2ext4.exe'
|
||||||
|
- path: 'device-util.exe'
|
||||||
- path: 'wclayer.exe'
|
- path: 'wclayer.exe'
|
||||||
- path: 'grantvmgroupaccess.exe'
|
- path: 'grantvmgroupaccess.exe'
|
||||||
- path: 'uvmboot.exe'
|
- path: 'uvmboot.exe'
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
@ -3,8 +3,8 @@ module github.com/Microsoft/hcsshim
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab
|
||||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
|
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59
|
||||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
|
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
|
||||||
github.com/containerd/containerd v1.3.2
|
github.com/containerd/containerd v1.3.2
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
||||||
@ -17,16 +17,16 @@ require (
|
|||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
|
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
|
||||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
|
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
github.com/opencontainers/runtime-spec v1.0.2
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/sirupsen/logrus v1.4.2
|
||||||
github.com/stretchr/testify v1.4.0 // indirect
|
github.com/stretchr/testify v1.4.0 // indirect
|
||||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
|
github.com/urfave/cli v1.22.2
|
||||||
go.opencensus.io v0.22.0
|
go.opencensus.io v0.22.0
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
|
||||||
google.golang.org/grpc v1.23.1
|
google.golang.org/grpc v1.23.1
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
|
42
vendor/github.com/Microsoft/hcsshim/go.sum
generated
vendored
42
vendor/github.com/Microsoft/hcsshim/go.sum
generated
vendored
@ -1,11 +1,12 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
|
github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab h1:9pygWVFqbY9lPxM0peffumuVDyMuIMzNLyO9uFjJuQo=
|
||||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||||
|
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
|
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
|
||||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
|
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
|
||||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||||
@ -20,17 +21,16 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAec
|
|||||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=
|
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=
|
||||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
|
||||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
|
||||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||||
@ -47,11 +47,12 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
|||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
@ -61,14 +62,20 @@ github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A
|
|||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f h1:a969LJ4IQFwRHYqonHtUDMSh9i54WcKggeEkQ3fZMl4=
|
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f h1:a969LJ4IQFwRHYqonHtUDMSh9i54WcKggeEkQ3fZMl4=
|
||||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
|
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||||
@ -79,8 +86,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1
|
|||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
|
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
@ -109,14 +116,15 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479 h1:LhLiKguPgZL+Tglay4GhVtfF0kb8cvOJ0dHTCBO8YNI=
|
||||||
|
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
49
vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
generated
vendored
Normal file
49
vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package hcs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetServiceProperties returns properties of the host compute service.
|
||||||
|
func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) {
|
||||||
|
operation := "hcsshim::GetServiceProperties"
|
||||||
|
|
||||||
|
queryb, err := json.Marshal(q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb))
|
||||||
|
events := processHcsResult(ctx, resultJSON)
|
||||||
|
if err != nil {
|
||||||
|
return nil, &HcsError{Op: operation, Err: err, Events: events}
|
||||||
|
}
|
||||||
|
|
||||||
|
if propertiesJSON == "" {
|
||||||
|
return nil, ErrUnexpectedValue
|
||||||
|
}
|
||||||
|
properties := &hcsschema.ServiceProperties{}
|
||||||
|
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return properties, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModifyServiceSettings modifies settings of the host compute service.
|
||||||
|
func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error {
|
||||||
|
operation := "hcsshim::ModifyServiceSettings"
|
||||||
|
|
||||||
|
settingsJSON, err := json.Marshal(settings)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON))
|
||||||
|
events := processHcsResult(ctx, resultJSON)
|
||||||
|
if err != nil {
|
||||||
|
return &HcsError{Op: operation, Err: err, Events: events}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
3
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -28,8 +28,7 @@ type System struct {
|
|||||||
waitBlock chan struct{}
|
waitBlock chan struct{}
|
||||||
waitError error
|
waitError error
|
||||||
exitError error
|
exitError error
|
||||||
|
os, typ string
|
||||||
os, typ string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSystem(id string) *System {
|
func newSystem(id string) *System {
|
||||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
@ -17,12 +17,15 @@ type HNSEndpoint struct {
|
|||||||
Policies []json.RawMessage `json:",omitempty"`
|
Policies []json.RawMessage `json:",omitempty"`
|
||||||
MacAddress string `json:",omitempty"`
|
MacAddress string `json:",omitempty"`
|
||||||
IPAddress net.IP `json:",omitempty"`
|
IPAddress net.IP `json:",omitempty"`
|
||||||
|
IPv6Address net.IP `json:",omitempty"`
|
||||||
DNSSuffix string `json:",omitempty"`
|
DNSSuffix string `json:",omitempty"`
|
||||||
DNSServerList string `json:",omitempty"`
|
DNSServerList string `json:",omitempty"`
|
||||||
GatewayAddress string `json:",omitempty"`
|
GatewayAddress string `json:",omitempty"`
|
||||||
|
GatewayAddressV6 string `json:",omitempty"`
|
||||||
EnableInternalDNS bool `json:",omitempty"`
|
EnableInternalDNS bool `json:",omitempty"`
|
||||||
DisableICC bool `json:",omitempty"`
|
DisableICC bool `json:",omitempty"`
|
||||||
PrefixLength uint8 `json:",omitempty"`
|
PrefixLength uint8 `json:",omitempty"`
|
||||||
|
IPv6PrefixLength uint8 `json:",omitempty"`
|
||||||
IsRemoteEndpoint bool `json:",omitempty"`
|
IsRemoteEndpoint bool `json:",omitempty"`
|
||||||
EnableLowMetric bool `json:",omitempty"`
|
EnableLowMetric bool `json:",omitempty"`
|
||||||
Namespace *Namespace `json:",omitempty"`
|
Namespace *Namespace `json:",omitempty"`
|
||||||
|
158
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
158
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
@ -11,72 +11,11 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/internal/longpath"
|
"github.com/Microsoft/hcsshim/internal/longpath"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
|
|
||||||
winio "github.com/Microsoft/go-winio"
|
winio "github.com/Microsoft/go-winio"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run $GOROOT\src\syscall\mksyscall_windows.go -output zsyscall_windows.go safeopen.go
|
|
||||||
|
|
||||||
//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
|
|
||||||
//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
|
|
||||||
//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
|
||||||
//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
|
|
||||||
//sys localFree(ptr uintptr) = kernel32.LocalFree
|
|
||||||
|
|
||||||
type ioStatusBlock struct {
|
|
||||||
Status, Information uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
type objectAttributes struct {
|
|
||||||
Length uintptr
|
|
||||||
RootDirectory uintptr
|
|
||||||
ObjectName uintptr
|
|
||||||
Attributes uintptr
|
|
||||||
SecurityDescriptor uintptr
|
|
||||||
SecurityQoS uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
type unicodeString struct {
|
|
||||||
Length uint16
|
|
||||||
MaximumLength uint16
|
|
||||||
Buffer uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileLinkInformation struct {
|
|
||||||
ReplaceIfExists bool
|
|
||||||
RootDirectory uintptr
|
|
||||||
FileNameLength uint32
|
|
||||||
FileName [1]uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileDispositionInformationEx struct {
|
|
||||||
Flags uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
_FileLinkInformation = 11
|
|
||||||
_FileDispositionInformationEx = 64
|
|
||||||
|
|
||||||
FILE_READ_ATTRIBUTES = 0x0080
|
|
||||||
FILE_WRITE_ATTRIBUTES = 0x0100
|
|
||||||
DELETE = 0x10000
|
|
||||||
|
|
||||||
FILE_OPEN = 1
|
|
||||||
FILE_CREATE = 2
|
|
||||||
|
|
||||||
FILE_DIRECTORY_FILE = 0x00000001
|
|
||||||
FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
|
|
||||||
FILE_DELETE_ON_CLOSE = 0x00001000
|
|
||||||
FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
|
|
||||||
FILE_OPEN_REPARSE_POINT = 0x00200000
|
|
||||||
|
|
||||||
FILE_DISPOSITION_DELETE = 0x00000001
|
|
||||||
|
|
||||||
_OBJ_DONT_REPARSE = 0x1000
|
|
||||||
|
|
||||||
_STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B
|
|
||||||
)
|
|
||||||
|
|
||||||
func OpenRoot(path string) (*os.File, error) {
|
func OpenRoot(path string) (*os.File, error) {
|
||||||
longpath, err := longpath.LongAbs(path)
|
longpath, err := longpath.LongAbs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -85,16 +24,24 @@ func OpenRoot(path string) (*os.File, error) {
|
|||||||
return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING)
|
return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ntRelativePath(path string) ([]uint16, error) {
|
func cleanGoStringRelativePath(path string) (string, error) {
|
||||||
path = filepath.Clean(path)
|
path = filepath.Clean(path)
|
||||||
if strings.Contains(path, ":") {
|
if strings.Contains(path, ":") {
|
||||||
// Since alternate data streams must follow the file they
|
// Since alternate data streams must follow the file they
|
||||||
// are attached to, finding one here (out of order) is invalid.
|
// are attached to, finding one here (out of order) is invalid.
|
||||||
return nil, errors.New("path contains invalid character `:`")
|
return "", errors.New("path contains invalid character `:`")
|
||||||
}
|
}
|
||||||
fspath := filepath.FromSlash(path)
|
fspath := filepath.FromSlash(path)
|
||||||
if len(fspath) > 0 && fspath[0] == '\\' {
|
if len(fspath) > 0 && fspath[0] == '\\' {
|
||||||
return nil, errors.New("expected relative path")
|
return "", errors.New("expected relative path")
|
||||||
|
}
|
||||||
|
return fspath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ntRelativePath(path string) ([]uint16, error) {
|
||||||
|
fspath, err := cleanGoStringRelativePath(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
path16 := utf16.Encode(([]rune)(fspath))
|
path16 := utf16.Encode(([]rune)(fspath))
|
||||||
@ -110,11 +57,11 @@ func ntRelativePath(path string) ([]uint16, error) {
|
|||||||
func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
|
func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
|
||||||
var (
|
var (
|
||||||
h uintptr
|
h uintptr
|
||||||
iosb ioStatusBlock
|
iosb winapi.IOStatusBlock
|
||||||
oa objectAttributes
|
oa winapi.ObjectAttributes
|
||||||
)
|
)
|
||||||
|
|
||||||
path16, err := ntRelativePath(path)
|
cleanRelativePath, err := cleanGoStringRelativePath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -123,20 +70,16 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl
|
|||||||
return nil, errors.New("missing root directory")
|
return nil, errors.New("missing root directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2)
|
pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath)
|
||||||
defer localFree(upathBuffer)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
upath := (*unicodeString)(unsafe.Pointer(upathBuffer))
|
}
|
||||||
upath.Length = uint16(len(path16) * 2)
|
|
||||||
upath.MaximumLength = upath.Length
|
|
||||||
upath.Buffer = upathBuffer + unsafe.Sizeof(*upath)
|
|
||||||
copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16)
|
|
||||||
|
|
||||||
oa.Length = unsafe.Sizeof(oa)
|
oa.Length = unsafe.Sizeof(oa)
|
||||||
oa.ObjectName = upathBuffer
|
oa.ObjectName = uintptr(unsafe.Pointer(pathUnicode))
|
||||||
oa.RootDirectory = uintptr(root.Fd())
|
oa.RootDirectory = uintptr(root.Fd())
|
||||||
oa.Attributes = _OBJ_DONT_REPARSE
|
oa.Attributes = winapi.OBJ_DONT_REPARSE
|
||||||
status := ntCreateFile(
|
status := winapi.NtCreateFile(
|
||||||
&h,
|
&h,
|
||||||
accessMask|syscall.SYNCHRONIZE,
|
accessMask|syscall.SYNCHRONIZE,
|
||||||
&oa,
|
&oa,
|
||||||
@ -145,12 +88,12 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl
|
|||||||
0,
|
0,
|
||||||
shareFlags,
|
shareFlags,
|
||||||
createDisposition,
|
createDisposition,
|
||||||
FILE_OPEN_FOR_BACKUP_INTENT|FILE_SYNCHRONOUS_IO_NONALERT|flags,
|
winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
return nil, rtlNtStatusToDosError(status)
|
return nil, winapi.RtlNtStatusToDosError(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path))
|
fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path))
|
||||||
@ -182,7 +125,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
|
|||||||
oldroot,
|
oldroot,
|
||||||
syscall.FILE_WRITE_ATTRIBUTES,
|
syscall.FILE_WRITE_ATTRIBUTES,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -199,8 +142,8 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
|
|||||||
newroot,
|
newroot,
|
||||||
syscall.GENERIC_READ,
|
syscall.GENERIC_READ,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
FILE_DIRECTORY_FILE)
|
winapi.FILE_DIRECTORY_FILE)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err}
|
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err}
|
||||||
}
|
}
|
||||||
@ -211,7 +154,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
|
if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
|
||||||
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)}
|
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -227,24 +170,25 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2
|
size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2
|
||||||
linkinfoBuffer := localAlloc(0, size)
|
linkinfoBuffer := winapi.LocalAlloc(0, size)
|
||||||
defer localFree(linkinfoBuffer)
|
defer winapi.LocalFree(linkinfoBuffer)
|
||||||
linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer))
|
|
||||||
|
linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer))
|
||||||
linkinfo.RootDirectory = parent.Fd()
|
linkinfo.RootDirectory = parent.Fd()
|
||||||
linkinfo.FileNameLength = uint32(len(newbase16) * 2)
|
linkinfo.FileNameLength = uint32(len(newbase16) * 2)
|
||||||
copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16)
|
copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16)
|
||||||
|
|
||||||
var iosb ioStatusBlock
|
var iosb winapi.IOStatusBlock
|
||||||
status := ntSetInformationFile(
|
status := winapi.NtSetInformationFile(
|
||||||
oldf.Fd(),
|
oldf.Fd(),
|
||||||
&iosb,
|
&iosb,
|
||||||
linkinfoBuffer,
|
linkinfoBuffer,
|
||||||
uint32(size),
|
uint32(size),
|
||||||
_FileLinkInformation,
|
winapi.FileLinkInformationClass,
|
||||||
)
|
)
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)}
|
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -252,17 +196,17 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
|
|||||||
|
|
||||||
// deleteOnClose marks a file to be deleted when the handle is closed.
|
// deleteOnClose marks a file to be deleted when the handle is closed.
|
||||||
func deleteOnClose(f *os.File) error {
|
func deleteOnClose(f *os.File) error {
|
||||||
disposition := fileDispositionInformationEx{Flags: FILE_DISPOSITION_DELETE}
|
disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE}
|
||||||
var iosb ioStatusBlock
|
var iosb winapi.IOStatusBlock
|
||||||
status := ntSetInformationFile(
|
status := winapi.NtSetInformationFile(
|
||||||
f.Fd(),
|
f.Fd(),
|
||||||
&iosb,
|
&iosb,
|
||||||
uintptr(unsafe.Pointer(&disposition)),
|
uintptr(unsafe.Pointer(&disposition)),
|
||||||
uint32(unsafe.Sizeof(disposition)),
|
uint32(unsafe.Sizeof(disposition)),
|
||||||
_FileDispositionInformationEx,
|
winapi.FileDispositionInformationExClass,
|
||||||
)
|
)
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
return rtlNtStatusToDosError(status)
|
return winapi.RtlNtStatusToDosError(status)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -291,10 +235,10 @@ func RemoveRelative(path string, root *os.File) error {
|
|||||||
f, err := openRelativeInternal(
|
f, err := openRelativeInternal(
|
||||||
path,
|
path,
|
||||||
root,
|
root,
|
||||||
FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|DELETE,
|
winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
FILE_OPEN_REPARSE_POINT)
|
winapi.FILE_OPEN_REPARSE_POINT)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
err = deleteOnClose(f)
|
err = deleteOnClose(f)
|
||||||
@ -385,8 +329,8 @@ func MkdirRelative(path string, root *os.File) error {
|
|||||||
root,
|
root,
|
||||||
0,
|
0,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_CREATE,
|
winapi.FILE_CREATE,
|
||||||
FILE_DIRECTORY_FILE)
|
winapi.FILE_DIRECTORY_FILE)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
} else {
|
} else {
|
||||||
@ -401,10 +345,10 @@ func LstatRelative(path string, root *os.File) (os.FileInfo, error) {
|
|||||||
f, err := openRelativeInternal(
|
f, err := openRelativeInternal(
|
||||||
path,
|
path,
|
||||||
root,
|
root,
|
||||||
FILE_READ_ATTRIBUTES,
|
winapi.FILE_READ_ATTRIBUTES,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
FILE_OPEN_REPARSE_POINT)
|
winapi.FILE_OPEN_REPARSE_POINT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err}
|
return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err}
|
||||||
}
|
}
|
||||||
@ -421,7 +365,7 @@ func EnsureNotReparsePointRelative(path string, root *os.File) error {
|
|||||||
root,
|
root,
|
||||||
0,
|
0,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
0)
|
0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
79
vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go
generated
vendored
79
vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go
generated
vendored
@ -1,79 +0,0 @@
|
|||||||
// Code generated by 'go generate'; DO NOT EDIT.
|
|
||||||
|
|
||||||
package safefile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ unsafe.Pointer
|
|
||||||
|
|
||||||
// Do the interface allocations only once for common
|
|
||||||
// Errno values.
|
|
||||||
const (
|
|
||||||
errnoERROR_IO_PENDING = 997
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
|
||||||
)
|
|
||||||
|
|
||||||
// errnoErr returns common boxed Errno values, to prevent
|
|
||||||
// allocations at runtime.
|
|
||||||
func errnoErr(e syscall.Errno) error {
|
|
||||||
switch e {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case errnoERROR_IO_PENDING:
|
|
||||||
return errERROR_IO_PENDING
|
|
||||||
}
|
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
|
||||||
|
|
||||||
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
|
||||||
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
|
|
||||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
|
||||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
|
||||||
)
|
|
||||||
|
|
||||||
func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
|
||||||
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
|
||||||
status = uint32(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
|
||||||
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
|
||||||
status = uint32(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func rtlNtStatusToDosError(status uint32) (winerr error) {
|
|
||||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
|
||||||
if r0 != 0 {
|
|
||||||
winerr = syscall.Errno(r0)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func localAlloc(flags uint32, size int) (ptr uintptr) {
|
|
||||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
|
||||||
ptr = uintptr(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func localFree(ptr uintptr) {
|
|
||||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
|
||||||
return
|
|
||||||
}
|
|
16
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardAddInstanceRequest struct {
|
||||||
|
Id string `json:"Id,omitempty"`
|
||||||
|
CredentialSpec string `json:"CredentialSpec,omitempty"`
|
||||||
|
Transport string `json:"Transport,omitempty"`
|
||||||
|
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardHvSocketServiceConfig struct {
|
||||||
|
ServiceId string `json:"ServiceId,omitempty"`
|
||||||
|
ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"`
|
||||||
|
}
|
16
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardInstance struct {
|
||||||
|
Id string `json:"Id,omitempty"`
|
||||||
|
CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"`
|
||||||
|
HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"`
|
||||||
|
}
|
17
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardModifyOperation string
|
||||||
|
|
||||||
|
const (
|
||||||
|
AddInstance ContainerCredentialGuardModifyOperation = "AddInstance"
|
||||||
|
RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance"
|
||||||
|
)
|
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardOperationRequest struct {
|
||||||
|
Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"`
|
||||||
|
OperationDetails interface{} `json:"OperationDetails,omitempty"`
|
||||||
|
}
|
14
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go
generated
vendored
Normal file
14
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardRemoveInstanceRequest struct {
|
||||||
|
Id string `json:"Id,omitempty"`
|
||||||
|
}
|
14
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go
generated
vendored
Normal file
14
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ContainerCredentialGuardSystemInfo struct {
|
||||||
|
Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"`
|
||||||
|
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go
generated
vendored
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go
generated
vendored
@ -9,8 +9,19 @@
|
|||||||
|
|
||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
type Device struct {
|
type DeviceType string
|
||||||
|
|
||||||
// The interface class guid of the device to assign to container.
|
const (
|
||||||
|
ClassGUID DeviceType = "ClassGuid"
|
||||||
|
DeviceInstance = "DeviceInstance"
|
||||||
|
GPUMirror = "GpuMirror"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Device struct {
|
||||||
|
// The type of device to assign to the container.
|
||||||
|
Type DeviceType `json:"Type,omitempty"`
|
||||||
|
// The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid.
|
||||||
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
|
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
|
||||||
|
// The location path of the device to assign to the container. Only used when Type is DeviceInstance.
|
||||||
|
LocationPath string `json:"LocationPath,omitempty"`
|
||||||
}
|
}
|
||||||
|
17
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
// This class defines address settings applied to a VM
|
||||||
|
// by the GCS every time a VM starts or restores.
|
||||||
|
type HvSocketAddress struct {
|
||||||
|
LocalAddress string `json:"LocalAddress,omitempty"`
|
||||||
|
ParentAddress string `json:"ParentAddress,omitempty"`
|
||||||
|
}
|
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go
generated
vendored
Normal file
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type LogicalProcessor struct {
|
||||||
|
LpIndex uint32 `json:"LpIndex,omitempty"`
|
||||||
|
NodeNumber uint8 `json:"NodeNumber, omitempty"`
|
||||||
|
PackageId uint32 `json:"PackageId, omitempty"`
|
||||||
|
CoreId uint32 `json:"CoreId, omitempty"`
|
||||||
|
RootVpIndex int32 `json:"RootVpIndex, omitempty"`
|
||||||
|
}
|
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go
generated
vendored
@ -10,5 +10,5 @@
|
|||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
type Memory struct {
|
type Memory struct {
|
||||||
SizeInMB int32 `json:"SizeInMB,omitempty"`
|
SizeInMB uint64 `json:"SizeInMB,omitempty"`
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
generated
vendored
@ -10,7 +10,7 @@
|
|||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
type Memory2 struct {
|
type Memory2 struct {
|
||||||
SizeInMB int32 `json:"SizeInMB,omitempty"`
|
SizeInMB uint64 `json:"SizeInMB,omitempty"`
|
||||||
|
|
||||||
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
|
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
|
||||||
|
|
||||||
|
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ModificationRequest struct {
|
||||||
|
PropertyType PropertyType `json:"PropertyType,omitempty"`
|
||||||
|
Settings interface{} `json:"Settings,omitempty"`
|
||||||
|
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
type ProcessorTopology struct {
|
||||||
|
LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"`
|
||||||
|
LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"`
|
||||||
|
}
|
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go
generated
vendored
@ -18,6 +18,8 @@ const (
|
|||||||
PTProcessList PropertyType = "ProcessList"
|
PTProcessList PropertyType = "ProcessList"
|
||||||
PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed"
|
PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed"
|
||||||
PTSharedMemoryRegion PropertyType = "SharedMemoryRegion"
|
PTSharedMemoryRegion PropertyType = "SharedMemoryRegion"
|
||||||
|
PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually.
|
||||||
PTGuestConnection PropertyType = "GuestConnection"
|
PTGuestConnection PropertyType = "GuestConnection"
|
||||||
PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus"
|
PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus"
|
||||||
|
PTProcessorTopology PropertyType = "ProcessorTopology"
|
||||||
)
|
)
|
||||||
|
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go
generated
vendored
Normal file
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
/*
|
||||||
|
* HCS API
|
||||||
|
*
|
||||||
|
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||||
|
*
|
||||||
|
* API version: 2.4
|
||||||
|
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package hcsschema
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
type ServiceProperties struct {
|
||||||
|
// Changed Properties field to []json.RawMessage from []interface{} to avoid having to
|
||||||
|
// remarshal sp.Properties[n] and unmarshal into the type(s) we want.
|
||||||
|
Properties []json.RawMessage `json:"Properties,omitempty"`
|
||||||
|
}
|
22
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
|||||||
//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
|
//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
|
||||||
//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
|
//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
|
||||||
//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
|
//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
|
||||||
|
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
|
||||||
//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
|
//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
|
||||||
//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
|
//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
|
||||||
|
|
||||||
@ -337,6 +338,27 @@ func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, confi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings")
|
||||||
|
defer span.End()
|
||||||
|
defer func() {
|
||||||
|
if result != "" {
|
||||||
|
span.AddAttributes(trace.StringAttribute("result", result))
|
||||||
|
}
|
||||||
|
oc.SetSpanStatus(span, hr)
|
||||||
|
}()
|
||||||
|
span.AddAttributes(trace.StringAttribute("settings", settings))
|
||||||
|
|
||||||
|
return result, execute(ctx, timeout.SyscallWatcher, func() error {
|
||||||
|
var resultp *uint16
|
||||||
|
err := hcsModifyServiceSettings(settings, &resultp)
|
||||||
|
if resultp != nil {
|
||||||
|
result = interop.ConvertAndFreeCoTaskMemString(resultp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) {
|
func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback")
|
ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
24
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
generated
vendored
24
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
generated
vendored
@ -50,6 +50,7 @@ var (
|
|||||||
procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
|
procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
|
||||||
procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties")
|
procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties")
|
||||||
procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem")
|
procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem")
|
||||||
|
procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings")
|
||||||
procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback")
|
procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback")
|
||||||
procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
|
procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
|
||||||
procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
|
procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
|
||||||
@ -314,6 +315,29 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hcsModifyServiceSettings(settings string, result **uint16) (hr error) {
|
||||||
|
var _p0 *uint16
|
||||||
|
_p0, hr = syscall.UTF16PtrFromString(settings)
|
||||||
|
if hr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return _hcsModifyServiceSettings(_p0, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
|
||||||
|
if hr = procHcsModifyServiceSettings.Find(); hr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0)
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
|
func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) {
|
||||||
if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil {
|
if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil {
|
||||||
return
|
return
|
||||||
|
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||||
"github.com/Microsoft/hcsshim/internal/oc"
|
"github.com/Microsoft/hcsshim/internal/oc"
|
||||||
"github.com/Microsoft/hcsshim/internal/safefile"
|
"github.com/Microsoft/hcsshim/internal/safefile"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ type dirInfo struct {
|
|||||||
func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
|
func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
|
||||||
for i := range dis {
|
for i := range dis {
|
||||||
di := &dis[len(dis)-i-1] // reverse order: process child directories first
|
di := &dis[len(dis)-i-1] // reverse order: process child directories first
|
||||||
f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_OPEN, safefile.FILE_DIRECTORY_FILE)
|
f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -47,6 +48,7 @@ func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -92,14 +94,12 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e
|
|||||||
|
|
||||||
extraFlags := uint32(0)
|
extraFlags := uint32(0)
|
||||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||||
extraFlags |= safefile.FILE_DIRECTORY_FILE
|
extraFlags |= winapi.FILE_DIRECTORY_FILE
|
||||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
|
||||||
w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
|
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
|
||||||
f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, extraFlags)
|
f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return hcserror.New(err, "Failed to safefile.OpenRelative", name)
|
return hcserror.New(err, "Failed to safefile.OpenRelative", name)
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
generated
vendored
@ -10,9 +10,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// CreateScratchLayer creates and populates new read-write layer for use by a container.
|
// CreateScratchLayer creates and populates new read-write layer for use by a container.
|
||||||
// This requires both the id of the direct parent layer, as well as the full list
|
// This requires the full list of paths to all parent layers up to the base
|
||||||
// of paths to all parent layers up to the base (and including the direct parent
|
|
||||||
// whose id was provided).
|
|
||||||
func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
|
func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) {
|
||||||
title := "hcsshim::CreateScratchLayer"
|
title := "hcsshim::CreateScratchLayer"
|
||||||
ctx, span := trace.StartSpan(ctx, title)
|
ctx, span := trace.StartSpan(ctx, title)
|
||||||
|
13
vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
generated
vendored
13
vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
generated
vendored
@ -93,6 +93,19 @@ func (r *legacyLayerWriterWrapper) Close() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The reapplyDirectoryTimes must be called AFTER we are done with Tombstone
|
||||||
|
// deletion and hard link creation. This is because Tombstone deletion and hard link
|
||||||
|
// creation updates the directory last write timestamps so that will change the
|
||||||
|
// timestamps added by the `Add` call. Some container applications depend on the
|
||||||
|
// correctness of these timestamps and so we should change the timestamps back to
|
||||||
|
// the original value (i.e the value provided in the Add call) after this
|
||||||
|
// processing is done.
|
||||||
|
err = reapplyDirectoryTimes(r.destRoot, r.changedDi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Prepare the utility VM for use if one is present in the layer.
|
// Prepare the utility VM for use if one is present in the layer.
|
||||||
if r.HasUtilityVM {
|
if r.HasUtilityVM {
|
||||||
err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot)
|
err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot)
|
||||||
|
34
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
34
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/Microsoft/go-winio"
|
"github.com/Microsoft/go-winio"
|
||||||
"github.com/Microsoft/hcsshim/internal/longpath"
|
"github.com/Microsoft/hcsshim/internal/longpath"
|
||||||
"github.com/Microsoft/hcsshim/internal/safefile"
|
"github.com/Microsoft/hcsshim/internal/safefile"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errorIterationCanceled = errors.New("")
|
var errorIterationCanceled = errors.New("")
|
||||||
@ -341,7 +342,7 @@ type legacyLayerWriter struct {
|
|||||||
backupWriter *winio.BackupFileWriter
|
backupWriter *winio.BackupFileWriter
|
||||||
Tombstones []string
|
Tombstones []string
|
||||||
HasUtilityVM bool
|
HasUtilityVM bool
|
||||||
uvmDi []dirInfo
|
changedDi []dirInfo
|
||||||
addedFiles map[string]bool
|
addedFiles map[string]bool
|
||||||
PendingLinks []pendingLink
|
PendingLinks []pendingLink
|
||||||
pendingDirs []pendingDir
|
pendingDirs []pendingDir
|
||||||
@ -472,8 +473,8 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool
|
|||||||
srcRoot,
|
srcRoot,
|
||||||
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
|
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
|
||||||
syscall.FILE_SHARE_READ,
|
syscall.FILE_SHARE_READ,
|
||||||
safefile.FILE_OPEN,
|
winapi.FILE_OPEN,
|
||||||
safefile.FILE_OPEN_REPARSE_POINT)
|
winapi.FILE_OPEN_REPARSE_POINT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -488,14 +489,14 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool
|
|||||||
|
|
||||||
extraFlags := uint32(0)
|
extraFlags := uint32(0)
|
||||||
if isDir {
|
if isDir {
|
||||||
extraFlags |= safefile.FILE_DIRECTORY_FILE
|
extraFlags |= winapi.FILE_DIRECTORY_FILE
|
||||||
}
|
}
|
||||||
dest, err := safefile.OpenRelative(
|
dest, err := safefile.OpenRelative(
|
||||||
subPath,
|
subPath,
|
||||||
destRoot,
|
destRoot,
|
||||||
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
|
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
|
||||||
syscall.FILE_SHARE_READ,
|
syscall.FILE_SHARE_READ,
|
||||||
safefile.FILE_CREATE,
|
winapi.FILE_CREATE,
|
||||||
extraFlags)
|
extraFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -555,7 +556,7 @@ func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if isDir && !isReparsePoint {
|
if isDir {
|
||||||
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
|
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -583,6 +584,10 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
|
|||||||
return w.initUtilityVM()
|
return w.initUtilityVM()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||||
|
w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo})
|
||||||
|
}
|
||||||
|
|
||||||
name = filepath.Clean(name)
|
name = filepath.Clean(name)
|
||||||
if hasPathPrefix(name, utilityVMPath) {
|
if hasPathPrefix(name, utilityVMPath) {
|
||||||
if !w.HasUtilityVM {
|
if !w.HasUtilityVM {
|
||||||
@ -591,7 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
|
|||||||
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
|
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
|
||||||
return errors.New("invalid UtilityVM layer")
|
return errors.New("invalid UtilityVM layer")
|
||||||
}
|
}
|
||||||
createDisposition := uint32(safefile.FILE_OPEN)
|
createDisposition := uint32(winapi.FILE_OPEN)
|
||||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||||
st, err := safefile.LstatRelative(name, w.destRoot)
|
st, err := safefile.LstatRelative(name, w.destRoot)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
@ -612,16 +617,13 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
|
||||||
w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo})
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Overwrite any existing hard link.
|
// Overwrite any existing hard link.
|
||||||
err := safefile.RemoveRelative(name, w.destRoot)
|
err := safefile.RemoveRelative(name, w.destRoot)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
createDisposition = safefile.FILE_CREATE
|
createDisposition = winapi.FILE_CREATE
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := safefile.OpenRelative(
|
f, err := safefile.OpenRelative(
|
||||||
@ -630,7 +632,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
|
|||||||
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
|
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
|
||||||
syscall.FILE_SHARE_READ,
|
syscall.FILE_SHARE_READ,
|
||||||
createDisposition,
|
createDisposition,
|
||||||
safefile.FILE_OPEN_REPARSE_POINT,
|
winapi.FILE_OPEN_REPARSE_POINT,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -667,7 +669,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
|
|||||||
w.currentIsDir = true
|
w.currentIsDir = true
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0)
|
f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -805,11 +807,5 @@ func (w *legacyLayerWriter) Close() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if w.HasUtilityVM {
|
|
||||||
err := reapplyDirectoryTimes(w.destRoot, w.uvmDi)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
generated
vendored
@ -1,3 +1,6 @@
|
|||||||
|
// Package wclayer provides bindings to HCS's legacy layer management API and
|
||||||
|
// provides a higher level interface around these calls for container layer
|
||||||
|
// management.
|
||||||
package wclayer
|
package wclayer
|
||||||
|
|
||||||
import "github.com/Microsoft/go-winio/pkg/guid"
|
import "github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
13
vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go
generated
vendored
Normal file
13
vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
import "github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
|
||||||
|
//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA
|
||||||
|
//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA
|
||||||
|
//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW
|
||||||
|
//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW
|
||||||
|
|
||||||
|
type DevPropKey struct {
|
||||||
|
Fmtid guid.GUID
|
||||||
|
Pid uint32
|
||||||
|
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError
|
||||||
|
|
||||||
|
const (
|
||||||
|
STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B
|
||||||
|
ERROR_NO_MORE_ITEMS = 0x103
|
||||||
|
ERROR_MORE_DATA syscall.Errno = 234
|
||||||
|
)
|
||||||
|
|
||||||
|
func NTSuccess(status uint32) bool {
|
||||||
|
return status == 0
|
||||||
|
}
|
61
vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go
generated
vendored
Normal file
61
vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
|
||||||
|
//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
|
||||||
|
|
||||||
|
//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject
|
||||||
|
//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject
|
||||||
|
|
||||||
|
const (
|
||||||
|
FileLinkInformationClass = 11
|
||||||
|
FileDispositionInformationExClass = 64
|
||||||
|
|
||||||
|
FILE_READ_ATTRIBUTES = 0x0080
|
||||||
|
FILE_WRITE_ATTRIBUTES = 0x0100
|
||||||
|
DELETE = 0x10000
|
||||||
|
|
||||||
|
FILE_OPEN = 1
|
||||||
|
FILE_CREATE = 2
|
||||||
|
|
||||||
|
FILE_LIST_DIRECTORY = 0x00000001
|
||||||
|
FILE_DIRECTORY_FILE = 0x00000001
|
||||||
|
FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
|
||||||
|
FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
|
||||||
|
FILE_OPEN_REPARSE_POINT = 0x00200000
|
||||||
|
|
||||||
|
FILE_DISPOSITION_DELETE = 0x00000001
|
||||||
|
|
||||||
|
OBJ_DONT_REPARSE = 0x1000
|
||||||
|
|
||||||
|
STATUS_MORE_ENTRIES = 0x105
|
||||||
|
STATUS_NO_MORE_ENTRIES = 0x8000001a
|
||||||
|
)
|
||||||
|
|
||||||
|
type FileDispositionInformationEx struct {
|
||||||
|
Flags uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type IOStatusBlock struct {
|
||||||
|
Status, Information uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectAttributes struct {
|
||||||
|
Length uintptr
|
||||||
|
RootDirectory uintptr
|
||||||
|
ObjectName uintptr
|
||||||
|
Attributes uintptr
|
||||||
|
SecurityDescriptor uintptr
|
||||||
|
SecurityQoS uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectDirectoryInformation struct {
|
||||||
|
Name UnicodeString
|
||||||
|
TypeName UnicodeString
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileLinkInformation struct {
|
||||||
|
ReplaceIfExists bool
|
||||||
|
RootDirectory uintptr
|
||||||
|
FileNameLength uint32
|
||||||
|
FileName [1]uint16
|
||||||
|
}
|
120
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
Normal file
120
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Messages that can be received from an assigned io completion port.
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port
|
||||||
|
const (
|
||||||
|
JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
|
||||||
|
JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
|
||||||
|
JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
|
||||||
|
JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
|
||||||
|
JOB_OBJECT_MSG_NEW_PROCESS = 6
|
||||||
|
JOB_OBJECT_MSG_EXIT_PROCESS = 7
|
||||||
|
JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
|
||||||
|
JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
|
||||||
|
JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
|
||||||
|
JOB_OBJECT_MSG_NOTIFICATION_LIMIT = 11
|
||||||
|
)
|
||||||
|
|
||||||
|
// IO limit flags
|
||||||
|
//
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information
|
||||||
|
const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information
|
||||||
|
const (
|
||||||
|
JOB_OBJECT_CPU_RATE_CONTROL_ENABLE = 1 << iota
|
||||||
|
JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
|
||||||
|
JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
|
||||||
|
JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY
|
||||||
|
JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE
|
||||||
|
)
|
||||||
|
|
||||||
|
// JobObjectInformationClass values. Used for a call to QueryInformationJobObject
|
||||||
|
//
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject
|
||||||
|
const (
|
||||||
|
JobObjectBasicAccountingInformation uint32 = 1
|
||||||
|
JobObjectBasicProcessIdList uint32 = 3
|
||||||
|
JobObjectBasicAndIoAccountingInformation uint32 = 8
|
||||||
|
JobObjectLimitViolationInformation uint32 = 13
|
||||||
|
JobObjectNotificationLimitInformation2 uint32 = 33
|
||||||
|
)
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information
|
||||||
|
type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
|
||||||
|
PerProcessUserTimeLimit int64
|
||||||
|
PerJobUserTimeLimit int64
|
||||||
|
LimitFlags uint32
|
||||||
|
MinimumWorkingSetSize uintptr
|
||||||
|
MaximumWorkingSetSize uintptr
|
||||||
|
ActiveProcessLimit uint32
|
||||||
|
Affinity uintptr
|
||||||
|
PriorityClass uint32
|
||||||
|
SchedulingClass uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information
|
||||||
|
type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct {
|
||||||
|
ControlFlags uint32
|
||||||
|
Rate uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information
|
||||||
|
type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct {
|
||||||
|
MaxIops int64
|
||||||
|
MaxBandwidth int64
|
||||||
|
ReservationIops int64
|
||||||
|
BaseIOSize uint32
|
||||||
|
VolumeName string
|
||||||
|
ControlFlags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list
|
||||||
|
type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
|
||||||
|
NumberOfAssignedProcesses uint32
|
||||||
|
NumberOfProcessIdsInList uint32
|
||||||
|
ProcessIdList [1]uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port
|
||||||
|
type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||||
|
CompletionKey uintptr
|
||||||
|
CompletionPort windows.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// BOOL IsProcessInJob(
|
||||||
|
// HANDLE ProcessHandle,
|
||||||
|
// HANDLE JobHandle,
|
||||||
|
// PBOOL Result
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
|
||||||
|
|
||||||
|
// BOOL QueryInformationJobObject(
|
||||||
|
// HANDLE hJob,
|
||||||
|
// JOBOBJECTINFOCLASS JobObjectInformationClass,
|
||||||
|
// LPVOID lpJobObjectInformation,
|
||||||
|
// DWORD cbJobObjectInformationLength,
|
||||||
|
// LPDWORD lpReturnLength
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||||
|
|
||||||
|
// HANDLE OpenJobObjectW(
|
||||||
|
// DWORD dwDesiredAccess,
|
||||||
|
// BOOL bInheritHandle,
|
||||||
|
// LPCWSTR lpName
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||||
|
|
||||||
|
// DWORD SetIoRateControlInformationJobObject(
|
||||||
|
// HANDLE hJob,
|
||||||
|
// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject
|
30
vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go
generated
vendored
Normal file
30
vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
// BOOL LogonUserA(
|
||||||
|
// LPCWSTR lpszUsername,
|
||||||
|
// LPCWSTR lpszDomain,
|
||||||
|
// LPCWSTR lpszPassword,
|
||||||
|
// DWORD dwLogonType,
|
||||||
|
// DWORD dwLogonProvider,
|
||||||
|
// PHANDLE phToken
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW
|
||||||
|
|
||||||
|
// Logon types
|
||||||
|
const (
|
||||||
|
LOGON32_LOGON_INTERACTIVE uint32 = 2
|
||||||
|
LOGON32_LOGON_NETWORK uint32 = 3
|
||||||
|
LOGON32_LOGON_BATCH uint32 = 4
|
||||||
|
LOGON32_LOGON_SERVICE uint32 = 5
|
||||||
|
LOGON32_LOGON_UNLOCK uint32 = 7
|
||||||
|
LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8
|
||||||
|
LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logon providers
|
||||||
|
const (
|
||||||
|
LOGON32_PROVIDER_DEFAULT uint32 = 0
|
||||||
|
LOGON32_PROVIDER_WINNT40 uint32 = 2
|
||||||
|
LOGON32_PROVIDER_WINNT50 uint32 = 3
|
||||||
|
)
|
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
generated
vendored
Normal file
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
// VOID RtlMoveMemory(
|
||||||
|
// _Out_ VOID UNALIGNED *Destination,
|
||||||
|
// _In_ const VOID UNALIGNED *Source,
|
||||||
|
// _In_ SIZE_T Length
|
||||||
|
// );
|
||||||
|
//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
|
||||||
|
|
||||||
|
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
|
||||||
|
//sys LocalFree(ptr uintptr) = kernel32.LocalFree
|
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go
generated
vendored
Normal file
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
// DWORD SearchPathW(
|
||||||
|
// LPCWSTR lpPath,
|
||||||
|
// LPCWSTR lpFileName,
|
||||||
|
// LPCWSTR lpExtension,
|
||||||
|
// DWORD nBufferLength,
|
||||||
|
// LPWSTR lpBuffer,
|
||||||
|
// LPWSTR *lpFilePart
|
||||||
|
// );
|
||||||
|
//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath **uint16) (size uint32, err error) = kernel32.SearchPathW
|
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
const PROCESS_ALL_ACCESS uint32 = 2097151
|
7
vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go
generated
vendored
Normal file
7
vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
// Get count from all processor groups.
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups
|
||||||
|
const ALL_PROCESSOR_GROUPS = 0xFFFF
|
||||||
|
|
||||||
|
//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount
|
60
vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
generated
vendored
Normal file
60
vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package winapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"syscall"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UnicodeString struct {
|
||||||
|
Length uint16
|
||||||
|
MaximumLength uint16
|
||||||
|
Buffer *uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
//String converts a UnicodeString to a golang string
|
||||||
|
func (uni UnicodeString) String() string {
|
||||||
|
p := (*[0xffff]uint16)(unsafe.Pointer(uni.Buffer))
|
||||||
|
|
||||||
|
// UnicodeString is not guaranteed to be null terminated, therefore
|
||||||
|
// use the UnicodeString's Length field
|
||||||
|
lengthInChars := uni.Length / 2
|
||||||
|
return syscall.UTF16ToString(p[:lengthInChars])
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnicodeString allocates a new UnicodeString and copies `s` into
|
||||||
|
// the buffer of the new UnicodeString.
|
||||||
|
func NewUnicodeString(s string) (*UnicodeString, error) {
|
||||||
|
ws := utf16.Encode(([]rune)(s))
|
||||||
|
if len(ws) > 32767 {
|
||||||
|
return nil, syscall.ENAMETOOLONG
|
||||||
|
}
|
||||||
|
|
||||||
|
uni := &UnicodeString{
|
||||||
|
Length: uint16(len(ws) * 2),
|
||||||
|
MaximumLength: uint16(len(ws) * 2),
|
||||||
|
Buffer: &make([]uint16, len(ws))[0],
|
||||||
|
}
|
||||||
|
copy((*[32768]uint16)(unsafe.Pointer(uni.Buffer))[:], ws)
|
||||||
|
return uni, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertStringSetToSlice is a helper function used to convert the contents of
|
||||||
|
// `buf` into a string slice. `buf` contains a set of null terminated strings
|
||||||
|
// with an additional null at the end to indicate the end of the set.
|
||||||
|
func ConvertStringSetToSlice(buf []byte) ([]string, error) {
|
||||||
|
var results []string
|
||||||
|
prev := 0
|
||||||
|
for i := range buf {
|
||||||
|
if buf[i] == 0 {
|
||||||
|
if prev == i {
|
||||||
|
// found two null characters in a row, return result
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
results = append(results, string(buf[prev:i]))
|
||||||
|
prev = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("string set malformed: missing null terminator at end of buffer")
|
||||||
|
}
|
5
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
Normal file
5
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
// Package winapi contains various low-level bindings to Windows APIs. It can
|
||||||
|
// be thought of as an extension to golang.org/x/sys/windows.
|
||||||
|
package winapi
|
||||||
|
|
||||||
|
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go jobobject.go path.go logon.go memory.go processor.go devices.go filesystem.go errors.go
|
271
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
Normal file
271
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||||
|
|
||||||
|
package winapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ unsafe.Pointer
|
||||||
|
|
||||||
|
// Do the interface allocations only once for common
|
||||||
|
// Errno values.
|
||||||
|
const (
|
||||||
|
errnoERROR_IO_PENDING = 997
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||||
|
)
|
||||||
|
|
||||||
|
// errnoErr returns common boxed Errno values, to prevent
|
||||||
|
// allocations at runtime.
|
||||||
|
func errnoErr(e syscall.Errno) error {
|
||||||
|
switch e {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case errnoERROR_IO_PENDING:
|
||||||
|
return errERROR_IO_PENDING
|
||||||
|
}
|
||||||
|
// TODO: add more here, after collecting data on the common
|
||||||
|
// error values see on Windows. (perhaps when running
|
||||||
|
// all.bat?)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
|
||||||
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
|
|
||||||
|
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
||||||
|
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
||||||
|
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
||||||
|
procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject")
|
||||||
|
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||||
|
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||||
|
procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
|
||||||
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
|
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||||
|
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||||
|
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||||
|
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||||
|
procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
|
||||||
|
procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW")
|
||||||
|
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
||||||
|
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
|
||||||
|
procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject")
|
||||||
|
procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject")
|
||||||
|
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
||||||
|
)
|
||||||
|
|
||||||
|
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
|
||||||
|
var _p0 uint32
|
||||||
|
if inheritHandle {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
|
||||||
|
handle = windows.Handle(r0)
|
||||||
|
if handle == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
|
||||||
|
r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0)
|
||||||
|
ret = uint32(r0)
|
||||||
|
if ret == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath **uint16) (size uint32, err error) {
|
||||||
|
r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
||||||
|
size = uint32(r0)
|
||||||
|
if size == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
||||||
|
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
||||||
|
ptr = uintptr(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func LocalFree(ptr uintptr) {
|
||||||
|
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||||
|
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||||
|
amount = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0)
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) {
|
||||||
|
var _p0 *uint16
|
||||||
|
_p0, hr = syscall.UTF16PtrFromString(pDeviceID)
|
||||||
|
if hr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return _CMLocateDevNode(pdnDevInst, _p0, uFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
||||||
|
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
||||||
|
status = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
||||||
|
status = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
|
||||||
|
r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
||||||
|
status = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) {
|
||||||
|
var _p0 uint32
|
||||||
|
if singleEntry {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
var _p1 uint32
|
||||||
|
if restartScan {
|
||||||
|
_p1 = 1
|
||||||
|
} else {
|
||||||
|
_p1 = 0
|
||||||
|
}
|
||||||
|
r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||||
|
status = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
winerr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
8
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
@ -24,4 +24,12 @@ const (
|
|||||||
// V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
|
// V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
|
||||||
// channel).
|
// channel).
|
||||||
V19H1 = 18362
|
V19H1 = 18362
|
||||||
|
|
||||||
|
// V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual
|
||||||
|
// channel).
|
||||||
|
V19H2 = 18363
|
||||||
|
|
||||||
|
// V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual
|
||||||
|
// channel).
|
||||||
|
V20H1 = 19041
|
||||||
)
|
)
|
||||||
|
44
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
44
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -229,9 +229,47 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer
|
||||||
|
// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have
|
||||||
|
// a full buffer before we do a write operation to dst to reduce overheads associated
|
||||||
|
// with the write operations of small buffers.
|
||||||
func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
buf := bufPool.Get().(*[]byte)
|
// If the reader has a WriteTo method, use it to do the copy.
|
||||||
written, err = io.CopyBuffer(dst, src, *buf)
|
// Avoids an allocation and a copy.
|
||||||
bufPool.Put(buf)
|
if wt, ok := src.(io.WriterTo); ok {
|
||||||
|
return wt.WriteTo(dst)
|
||||||
|
}
|
||||||
|
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
||||||
|
if rt, ok := dst.(io.ReaderFrom); ok {
|
||||||
|
return rt.ReadFrom(src)
|
||||||
|
}
|
||||||
|
bufRef := bufPool.Get().(*[]byte)
|
||||||
|
defer bufPool.Put(bufRef)
|
||||||
|
buf := *bufRef
|
||||||
|
for {
|
||||||
|
nr, er := io.ReadAtLeast(src, buf, len(buf))
|
||||||
|
if nr > 0 {
|
||||||
|
nw, ew := dst.Write(buf[0:nr])
|
||||||
|
if nw > 0 {
|
||||||
|
written += int64(nw)
|
||||||
|
}
|
||||||
|
if ew != nil {
|
||||||
|
err = ew
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nr != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
// If an EOF happens after reading fewer than the requested bytes,
|
||||||
|
// ReadAtLeast returns ErrUnexpectedEOF.
|
||||||
|
if er != io.EOF && er != io.ErrUnexpectedEOF {
|
||||||
|
err = er
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build linux solaris darwin freebsd
|
// +build linux solaris darwin freebsd netbsd
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright The containerd Authors.
|
Copyright The containerd Authors.
|
||||||
|
8
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
8
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
@ -40,7 +40,13 @@ func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
|||||||
Offset: off,
|
Offset: off,
|
||||||
Size_: int64(len(p)),
|
Size_: int64(len(p)),
|
||||||
}
|
}
|
||||||
rc, err := ra.client.Read(ra.ctx, rr)
|
// we need a child context with cancel, or the eventually called
|
||||||
|
// grpc.NewStream will leak the goroutine until the whole thing is cleared.
|
||||||
|
// See comment at https://godoc.org/google.golang.org/grpc#ClientConn.NewStream
|
||||||
|
childCtx, cancel := context.WithCancel(ra.ctx)
|
||||||
|
// we MUST cancel the child context; see comment above
|
||||||
|
defer cancel()
|
||||||
|
rc, err := ra.client.Read(childCtx, rr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
Normal file
32
vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxRecvMsgSize defines the default maximum message size for
|
||||||
|
// receiving protobufs passed over the GRPC API.
|
||||||
|
DefaultMaxRecvMsgSize = 16 << 20
|
||||||
|
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||||
|
// sending protobufs passed over the GRPC API.
|
||||||
|
DefaultMaxSendMsgSize = 16 << 20
|
||||||
|
// DefaultRuntimeNSLabel defines the namespace label to check for the
|
||||||
|
// default runtime
|
||||||
|
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||||
|
// DefaultSnapshotterNSLabel defines the namespace label to check for the
|
||||||
|
// default snapshotter
|
||||||
|
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||||
|
)
|
37
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultRootDir is the default location used by containerd to store
|
||||||
|
// persistent data
|
||||||
|
DefaultRootDir = "/var/lib/containerd"
|
||||||
|
// DefaultStateDir is the default location used by containerd to store
|
||||||
|
// transient data
|
||||||
|
DefaultStateDir = "/run/containerd"
|
||||||
|
// DefaultAddress is the default unix socket address
|
||||||
|
DefaultAddress = "/run/containerd/containerd.sock"
|
||||||
|
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||||
|
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||||
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
|
// to store FIFOs.
|
||||||
|
DefaultFIFODir = "/run/containerd/fifo"
|
||||||
|
// DefaultRuntime is the default linux runtime
|
||||||
|
DefaultRuntime = "io.containerd.runc.v2"
|
||||||
|
)
|
45
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
Normal file
45
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultRootDir is the default location used by containerd to store
|
||||||
|
// persistent data
|
||||||
|
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||||
|
// DefaultStateDir is the default location used by containerd to store
|
||||||
|
// transient data
|
||||||
|
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultAddress is the default winpipe address
|
||||||
|
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||||
|
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||||
|
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||||
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
|
// to store FIFOs. Unused on Windows.
|
||||||
|
DefaultFIFODir = ""
|
||||||
|
// DefaultRuntime is the default windows runtime
|
||||||
|
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||||
|
)
|
19
vendor/github.com/containerd/containerd/defaults/doc.go
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/defaults/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package defaults provides several common defaults for interacting with
|
||||||
|
// containerd. These can be used on the client-side or server-side.
|
||||||
|
package defaults
|
9
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
9
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
@ -96,6 +96,15 @@ func getCPUVariant() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
|
||||||
|
// https://www.raspberrypi.org/forums/viewtopic.php?t=12614
|
||||||
|
if runtime.GOARCH == "arm" && variant == "7" {
|
||||||
|
model, err := getCPUInfo("model name")
|
||||||
|
if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
|
||||||
|
variant = "6"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch strings.ToLower(variant) {
|
switch strings.ToLower(variant) {
|
||||||
case "8", "aarch64":
|
case "8", "aarch64":
|
||||||
// special case: if running a 32-bit userspace on aarch64, the variant should be "v7"
|
// special case: if running a 32-bit userspace on aarch64, the variant should be "v7"
|
||||||
|
12
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
12
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
@ -106,10 +106,8 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||||
if headers != nil {
|
for k, v := range headers {
|
||||||
for k, v := range headers {
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
req.Header[k] = append(req.Header[k], v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := ctxhttp.Do(ctx, client, req)
|
resp, err := ctxhttp.Do(ctx, client, req)
|
||||||
@ -152,10 +150,8 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if headers != nil {
|
for k, v := range headers {
|
||||||
for k, v := range headers {
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
req.Header[k] = append(req.Header[k], v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
reqParams := req.URL.Query()
|
reqParams := req.URL.Query()
|
||||||
|
2
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
@ -45,7 +45,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||||||
return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
|
return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, err := contextWithRepositoryScope(ctx, r.refspec, false)
|
ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
generated
vendored
@ -121,7 +121,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|||||||
|
|
||||||
rc, err := hrs.open(hrs.offset)
|
rc, err := hrs.open(hrs.offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "httpReaderSeeker: failed open")
|
return nil, errors.Wrapf(err, "httpReadSeeker: failed open")
|
||||||
}
|
}
|
||||||
|
|
||||||
if hrs.rc != nil {
|
if hrs.rc != nil {
|
||||||
|
20
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
20
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
@ -45,7 +45,7 @@ type dockerPusher struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||||
ctx, err := contextWithRepositoryScope(ctx, p.refspec, true)
|
ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -130,7 +130,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
|
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
|
||||||
preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
|
preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
|
||||||
pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo)
|
pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo)
|
||||||
|
|
||||||
// NOTE: the fromRepo might be private repo and
|
// NOTE: the fromRepo might be private repo and
|
||||||
// auth service still can grant token without error.
|
// auth service still can grant token without error.
|
||||||
@ -222,7 +222,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
// TODO: Support chunked upload
|
// TODO: Support chunked upload
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
respC := make(chan *http.Response, 1)
|
respC := make(chan response, 1)
|
||||||
body := ioutil.NopCloser(pr)
|
body := ioutil.NopCloser(pr)
|
||||||
|
|
||||||
req.body = func() (io.ReadCloser, error) {
|
req.body = func() (io.ReadCloser, error) {
|
||||||
@ -240,6 +240,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
defer close(respC)
|
defer close(respC)
|
||||||
resp, err := req.do(ctx)
|
resp, err := req.do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
respC <- response{err: err}
|
||||||
pr.CloseWithError(err)
|
pr.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -251,7 +252,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||||
pr.CloseWithError(err)
|
pr.CloseWithError(err)
|
||||||
}
|
}
|
||||||
respC <- resp
|
respC <- response{Response: resp}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return &pushWriter{
|
return &pushWriter{
|
||||||
@ -284,12 +285,17 @@ func getManifestPath(object string, dgst digest.Digest) []string {
|
|||||||
return []string{"manifests", object}
|
return []string{"manifests", object}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type response struct {
|
||||||
|
*http.Response
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
type pushWriter struct {
|
type pushWriter struct {
|
||||||
base *dockerBase
|
base *dockerBase
|
||||||
ref string
|
ref string
|
||||||
|
|
||||||
pipe *io.PipeWriter
|
pipe *io.PipeWriter
|
||||||
responseC <-chan *http.Response
|
responseC <-chan response
|
||||||
isManifest bool
|
isManifest bool
|
||||||
|
|
||||||
expected digest.Digest
|
expected digest.Digest
|
||||||
@ -339,8 +345,8 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
|||||||
|
|
||||||
// TODO: timeout waiting for response
|
// TODO: timeout waiting for response
|
||||||
resp := <-pw.responseC
|
resp := <-pw.responseC
|
||||||
if resp == nil {
|
if resp.err != nil {
|
||||||
return errors.New("no response")
|
return resp.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 201 is specified return status, some registries return
|
// 201 is specified return status, some registries return
|
||||||
|
2
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -263,7 +263,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
|||||||
return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts")
|
return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, err = contextWithRepositoryScope(ctx, refspec, false)
|
ctx, err = ContextWithRepositoryScope(ctx, refspec, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", ocispec.Descriptor{}, err
|
return "", ocispec.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
14
vendor/github.com/containerd/containerd/remotes/docker/scope.go
generated
vendored
14
vendor/github.com/containerd/containerd/remotes/docker/scope.go
generated
vendored
@ -26,10 +26,10 @@ import (
|
|||||||
"github.com/containerd/containerd/reference"
|
"github.com/containerd/containerd/reference"
|
||||||
)
|
)
|
||||||
|
|
||||||
// repositoryScope returns a repository scope string such as "repository:foo/bar:pull"
|
// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull"
|
||||||
// for "host/foo/bar:baz".
|
// for "host/foo/bar:baz".
|
||||||
// When push is true, both pull and push are added to the scope.
|
// When push is true, both pull and push are added to the scope.
|
||||||
func repositoryScope(refspec reference.Spec, push bool) (string, error) {
|
func RepositoryScope(refspec reference.Spec, push bool) (string, error) {
|
||||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -45,9 +45,9 @@ func repositoryScope(refspec reference.Spec, push bool) (string, error) {
|
|||||||
// value: []string (e.g. {"registry:foo/bar:pull"})
|
// value: []string (e.g. {"registry:foo/bar:pull"})
|
||||||
type tokenScopesKey struct{}
|
type tokenScopesKey struct{}
|
||||||
|
|
||||||
// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
|
// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
|
||||||
func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
|
func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
|
||||||
s, err := repositoryScope(refspec, push)
|
s, err := RepositoryScope(refspec, push)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -66,9 +66,9 @@ func WithScope(ctx context.Context, scope string) context.Context {
|
|||||||
return context.WithValue(ctx, tokenScopesKey{}, scopes)
|
return context.WithValue(ctx, tokenScopesKey{}, scopes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// contextWithAppendPullRepositoryScope is used to append repository pull
|
// ContextWithAppendPullRepositoryScope is used to append repository pull
|
||||||
// scope into existing scopes indexed by the tokenScopesKey{}.
|
// scope into existing scopes indexed by the tokenScopesKey{}.
|
||||||
func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
|
func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
|
||||||
return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo))
|
return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containerd/containerd/sys/stat_bsd.go
generated
vendored
2
vendor/github.com/containerd/containerd/sys/stat_bsd.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// +build darwin freebsd
|
// +build darwin freebsd netbsd
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright The containerd Authors.
|
Copyright The containerd Authors.
|
||||||
|
22
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
22
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package manager
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -30,16 +29,6 @@ func (e errPluginNotFound) Error() string {
|
|||||||
return "Error: No such CLI plugin: " + string(e)
|
return "Error: No such CLI plugin: " + string(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
type errPluginRequireExperimental string
|
|
||||||
|
|
||||||
// Note: errPluginRequireExperimental implements notFound so that the plugin
|
|
||||||
// is skipped when listing the plugins.
|
|
||||||
func (e errPluginRequireExperimental) NotFound() {}
|
|
||||||
|
|
||||||
func (e errPluginRequireExperimental) Error() string {
|
|
||||||
return fmt.Sprintf("plugin candidate %q: requires experimental CLI", string(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
type notFound interface{ NotFound() }
|
type notFound interface{ NotFound() }
|
||||||
|
|
||||||
// IsNotFound is true if the given error is due to a plugin not being found.
|
// IsNotFound is true if the given error is due to a plugin not being found.
|
||||||
@ -133,7 +122,7 @@ func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c := &candidate{paths[0]}
|
c := &candidate{paths[0]}
|
||||||
p, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
p, err := newPlugin(c, rootcmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -181,19 +170,12 @@ func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command
|
|||||||
}
|
}
|
||||||
|
|
||||||
c := &candidate{path: path}
|
c := &candidate{path: path}
|
||||||
plugin, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental)
|
plugin, err := newPlugin(c, rootcmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if plugin.Err != nil {
|
if plugin.Err != nil {
|
||||||
// TODO: why are we not returning plugin.Err?
|
// TODO: why are we not returning plugin.Err?
|
||||||
|
|
||||||
err := plugin.Err.(*pluginError).Cause()
|
|
||||||
// if an experimental plugin was invoked directly while experimental mode is off
|
|
||||||
// provide a more useful error message than "not found".
|
|
||||||
if err, ok := err.(errPluginRequireExperimental); ok {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, errPluginNotFound(name)
|
return nil, errPluginNotFound(name)
|
||||||
}
|
}
|
||||||
cmd := exec.Command(plugin.Path, args...)
|
cmd := exec.Command(plugin.Path, args...)
|
||||||
|
2
vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
generated
vendored
2
vendor/github.com/docker/cli/cli-plugins/manager/metadata.go
generated
vendored
@ -23,6 +23,6 @@ type Metadata struct {
|
|||||||
// URL is a pointer to the plugin's homepage.
|
// URL is a pointer to the plugin's homepage.
|
||||||
URL string `json:",omitempty"`
|
URL string `json:",omitempty"`
|
||||||
// Experimental specifies whether the plugin is experimental.
|
// Experimental specifies whether the plugin is experimental.
|
||||||
// Experimental plugins are not displayed on non-experimental CLIs.
|
// Deprecated: experimental features are now always enabled in the CLI
|
||||||
Experimental bool `json:",omitempty"`
|
Experimental bool `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
generated
vendored
6
vendor/github.com/docker/cli/cli-plugins/manager/plugin.go
generated
vendored
@ -35,7 +35,7 @@ type Plugin struct {
|
|||||||
// non-recoverable error.
|
// non-recoverable error.
|
||||||
//
|
//
|
||||||
// nolint: gocyclo
|
// nolint: gocyclo
|
||||||
func newPlugin(c Candidate, rootcmd *cobra.Command, allowExperimental bool) (Plugin, error) {
|
func newPlugin(c Candidate, rootcmd *cobra.Command) (Plugin, error) {
|
||||||
path := c.Path()
|
path := c.Path()
|
||||||
if path == "" {
|
if path == "" {
|
||||||
return Plugin{}, errors.New("plugin candidate path cannot be empty")
|
return Plugin{}, errors.New("plugin candidate path cannot be empty")
|
||||||
@ -96,10 +96,6 @@ func newPlugin(c Candidate, rootcmd *cobra.Command, allowExperimental bool) (Plu
|
|||||||
p.Err = wrapAsPluginError(err, "invalid metadata")
|
p.Err = wrapAsPluginError(err, "invalid metadata")
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
if p.Experimental && !allowExperimental {
|
|
||||||
p.Err = &pluginError{errPluginRequireExperimental(p.Name)}
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
if p.Metadata.SchemaVersion != "0.1.0" {
|
if p.Metadata.SchemaVersion != "0.1.0" {
|
||||||
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
|
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
|
||||||
return p, nil
|
return p, nil
|
||||||
|
27
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
27
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
@ -35,6 +35,7 @@ func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *p
|
|||||||
cobra.AddTemplateFunc("vendorAndVersion", vendorAndVersion)
|
cobra.AddTemplateFunc("vendorAndVersion", vendorAndVersion)
|
||||||
cobra.AddTemplateFunc("invalidPluginReason", invalidPluginReason)
|
cobra.AddTemplateFunc("invalidPluginReason", invalidPluginReason)
|
||||||
cobra.AddTemplateFunc("isPlugin", isPlugin)
|
cobra.AddTemplateFunc("isPlugin", isPlugin)
|
||||||
|
cobra.AddTemplateFunc("isExperimental", isExperimental)
|
||||||
cobra.AddTemplateFunc("decoratedName", decoratedName)
|
cobra.AddTemplateFunc("decoratedName", decoratedName)
|
||||||
|
|
||||||
rootCmd.SetUsageTemplate(usageTemplate)
|
rootCmd.SetUsageTemplate(usageTemplate)
|
||||||
@ -191,6 +192,19 @@ var helpCommand = &cobra.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isExperimental(cmd *cobra.Command) bool {
|
||||||
|
if _, ok := cmd.Annotations["experimentalCLI"]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var experimental bool
|
||||||
|
cmd.VisitParents(func(cmd *cobra.Command) {
|
||||||
|
if _, ok := cmd.Annotations["experimentalCLI"]; ok {
|
||||||
|
experimental = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return experimental
|
||||||
|
}
|
||||||
|
|
||||||
func isPlugin(cmd *cobra.Command) bool {
|
func isPlugin(cmd *cobra.Command) bool {
|
||||||
return cmd.Annotations[pluginmanager.CommandAnnotationPlugin] == "true"
|
return cmd.Annotations[pluginmanager.CommandAnnotationPlugin] == "true"
|
||||||
}
|
}
|
||||||
@ -282,11 +296,20 @@ func invalidPluginReason(cmd *cobra.Command) string {
|
|||||||
|
|
||||||
var usageTemplate = `Usage:
|
var usageTemplate = `Usage:
|
||||||
|
|
||||||
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
|
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
|
||||||
{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}}
|
{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}}
|
||||||
|
|
||||||
{{if ne .Long ""}}{{ .Long | trim }}{{ else }}{{ .Short | trim }}{{end}}
|
{{if ne .Long ""}}{{ .Long | trim }}{{ else }}{{ .Short | trim }}{{end}}
|
||||||
|
{{- if isExperimental .}}
|
||||||
|
|
||||||
|
EXPERIMENTAL:
|
||||||
|
{{.CommandPath}} is an experimental feature.
|
||||||
|
Experimental features provide early access to product functionality. These
|
||||||
|
features may change between releases without warning, or can be removed from a
|
||||||
|
future release. Learn more about experimental features in our documentation:
|
||||||
|
https://docs.docker.com/go/experimental/
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
{{- if gt .Aliases 0}}
|
{{- if gt .Aliases 0}}
|
||||||
|
|
||||||
Aliases:
|
Aliases:
|
||||||
|
31
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
31
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
@ -152,16 +152,6 @@ func (cli *DockerCli) ClientInfo() ClientInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cli *DockerCli) loadClientInfo() error {
|
func (cli *DockerCli) loadClientInfo() error {
|
||||||
var experimentalValue string
|
|
||||||
// Environment variable always overrides configuration
|
|
||||||
if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" {
|
|
||||||
experimentalValue = cli.ConfigFile().Experimental
|
|
||||||
}
|
|
||||||
hasExperimental, err := isEnabled(experimentalValue)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Experimental field")
|
|
||||||
}
|
|
||||||
|
|
||||||
var v string
|
var v string
|
||||||
if cli.client != nil {
|
if cli.client != nil {
|
||||||
v = cli.client.ClientVersion()
|
v = cli.client.ClientVersion()
|
||||||
@ -170,7 +160,7 @@ func (cli *DockerCli) loadClientInfo() error {
|
|||||||
}
|
}
|
||||||
cli.clientInfo = &ClientInfo{
|
cli.clientInfo = &ClientInfo{
|
||||||
DefaultVersion: v,
|
DefaultVersion: v,
|
||||||
HasExperimental: hasExperimental,
|
HasExperimental: true,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -308,9 +298,9 @@ func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigF
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
customHeaders := configFile.HTTPHeaders
|
customHeaders := make(map[string]string, len(configFile.HTTPHeaders))
|
||||||
if customHeaders == nil {
|
for k, v := range configFile.HTTPHeaders {
|
||||||
customHeaders = map[string]string{}
|
customHeaders[k] = v
|
||||||
}
|
}
|
||||||
customHeaders["User-Agent"] = UserAgent()
|
customHeaders["User-Agent"] = UserAgent()
|
||||||
clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders))
|
clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders))
|
||||||
@ -358,17 +348,6 @@ func resolveDefaultDockerEndpoint(opts *cliflags.CommonOptions) (docker.Endpoint
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isEnabled(value string) (bool, error) {
|
|
||||||
switch value {
|
|
||||||
case "enabled":
|
|
||||||
return true, nil
|
|
||||||
case "", "disabled":
|
|
||||||
return false, nil
|
|
||||||
default:
|
|
||||||
return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cli *DockerCli) initializeFromClient() {
|
func (cli *DockerCli) initializeFromClient() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
if strings.HasPrefix(cli.DockerEndpoint().Host, "tcp://") {
|
if strings.HasPrefix(cli.DockerEndpoint().Host, "tcp://") {
|
||||||
@ -471,6 +450,8 @@ type ServerInfo struct {
|
|||||||
|
|
||||||
// ClientInfo stores details about the supported features of the client
|
// ClientInfo stores details about the supported features of the client
|
||||||
type ClientInfo struct {
|
type ClientInfo struct {
|
||||||
|
// Deprecated: experimental CLI features always enabled. This field is kept
|
||||||
|
// for backward-compatibility, and is always "true".
|
||||||
HasExperimental bool
|
HasExperimental bool
|
||||||
DefaultVersion string
|
DefaultVersion string
|
||||||
}
|
}
|
||||||
|
9
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
9
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
@ -93,17 +93,18 @@ func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, is
|
|||||||
if !isDefaultRegistry {
|
if !isDefaultRegistry {
|
||||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||||
}
|
}
|
||||||
var authconfig configtypes.AuthConfig
|
var authconfig = configtypes.AuthConfig{}
|
||||||
var err error
|
var err error
|
||||||
if checkCredStore {
|
if checkCredStore {
|
||||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||||
} else {
|
if err != nil {
|
||||||
authconfig = configtypes.AuthConfig{}
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
authconfig.ServerAddress = serverAddress
|
authconfig.ServerAddress = serverAddress
|
||||||
authconfig.IdentityToken = ""
|
authconfig.IdentityToken = ""
|
||||||
res := types.AuthConfig(authconfig)
|
res := types.AuthConfig(authconfig)
|
||||||
return &res, err
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigureAuth handles prompting of user's username and password if needed
|
// ConfigureAuth handles prompting of user's username and password if needed
|
||||||
|
7
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
7
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@ -169,6 +169,13 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
|||||||
configFile.AuthConfigs = tmpAuthConfigs
|
configFile.AuthConfigs = tmpAuthConfigs
|
||||||
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
||||||
|
|
||||||
|
// User-Agent header is automatically set, and should not be stored in the configuration
|
||||||
|
for v := range configFile.HTTPHeaders {
|
||||||
|
if strings.EqualFold(v, "User-Agent") {
|
||||||
|
delete(configFile.HTTPHeaders, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data, err := json.MarshalIndent(configFile, "", "\t")
|
data, err := json.MarshalIndent(configFile, "", "\t")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
2
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
2
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
@ -20,7 +20,7 @@ func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
|
|||||||
if !strings.HasPrefix(split[0], "/dev/") {
|
if !strings.HasPrefix(split[0], "/dev/") {
|
||||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
return nil, fmt.Errorf("bad format for device path: %s", val)
|
||||||
}
|
}
|
||||||
weight, err := strconv.ParseUint(split[1], 10, 0)
|
weight, err := strconv.ParseUint(split[1], 10, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
||||||
}
|
}
|
||||||
|
62
vendor/github.com/docker/docker/AUTHORS
generated
vendored
62
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -45,6 +45,7 @@ AJ Bowen <aj@soulshake.net>
|
|||||||
Ajey Charantimath <ajey.charantimath@gmail.com>
|
Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||||
ajneu <ajneu@users.noreply.github.com>
|
ajneu <ajneu@users.noreply.github.com>
|
||||||
Akash Gupta <akagup@microsoft.com>
|
Akash Gupta <akagup@microsoft.com>
|
||||||
|
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
Akim Demaille <akim.demaille@docker.com>
|
Akim Demaille <akim.demaille@docker.com>
|
||||||
@ -52,10 +53,12 @@ Akira Koyasu <mail@akirakoyasu.net>
|
|||||||
Akshay Karle <akshay.a.karle@gmail.com>
|
Akshay Karle <akshay.a.karle@gmail.com>
|
||||||
Al Tobey <al@ooyala.com>
|
Al Tobey <al@ooyala.com>
|
||||||
alambike <alambike@gmail.com>
|
alambike <alambike@gmail.com>
|
||||||
|
Alan Hoyle <alan@alanhoyle.com>
|
||||||
Alan Scherger <flyinprogrammer@gmail.com>
|
Alan Scherger <flyinprogrammer@gmail.com>
|
||||||
Alan Thompson <cloojure@gmail.com>
|
Alan Thompson <cloojure@gmail.com>
|
||||||
Albert Callarisa <shark234@gmail.com>
|
Albert Callarisa <shark234@gmail.com>
|
||||||
Albert Zhang <zhgwenming@gmail.com>
|
Albert Zhang <zhgwenming@gmail.com>
|
||||||
|
Albin Kerouanton <albin@akerouanton.name>
|
||||||
Alejandro González Hevia <alejandrgh11@gmail.com>
|
Alejandro González Hevia <alejandrgh11@gmail.com>
|
||||||
Aleksa Sarai <asarai@suse.de>
|
Aleksa Sarai <asarai@suse.de>
|
||||||
Aleksandrs Fadins <aleks@s-ko.net>
|
Aleksandrs Fadins <aleks@s-ko.net>
|
||||||
@ -109,6 +112,7 @@ Amy Lindburg <amy.lindburg@docker.com>
|
|||||||
Anand Patil <anand.prabhakar.patil@gmail.com>
|
Anand Patil <anand.prabhakar.patil@gmail.com>
|
||||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||||
Anatoly Borodin <anatoly.borodin@gmail.com>
|
Anatoly Borodin <anatoly.borodin@gmail.com>
|
||||||
|
Anca Iordache <anca.iordache@docker.com>
|
||||||
Anchal Agrawal <aagrawa4@illinois.edu>
|
Anchal Agrawal <aagrawa4@illinois.edu>
|
||||||
Anda Xu <anda.xu@docker.com>
|
Anda Xu <anda.xu@docker.com>
|
||||||
Anders Janmyr <anders@janmyr.com>
|
Anders Janmyr <anders@janmyr.com>
|
||||||
@ -215,10 +219,12 @@ Benjamin Atkin <ben@benatkin.com>
|
|||||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
Benjamin Yolken <yolken@stripe.com>
|
Benjamin Yolken <yolken@stripe.com>
|
||||||
|
Benny Ng <benny.tpng@gmail.com>
|
||||||
Benoit Chesneau <bchesneau@gmail.com>
|
Benoit Chesneau <bchesneau@gmail.com>
|
||||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||||
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
||||||
Bert Goethals <bert@bertg.be>
|
Bert Goethals <bert@bertg.be>
|
||||||
|
Bertrand Roussel <broussel@sierrawireless.com>
|
||||||
Bevisy Zhang <binbin36520@gmail.com>
|
Bevisy Zhang <binbin36520@gmail.com>
|
||||||
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
||||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||||
@ -231,6 +237,7 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
|||||||
Blake Geno <blakegeno@gmail.com>
|
Blake Geno <blakegeno@gmail.com>
|
||||||
Boaz Shuster <ripcurld.github@gmail.com>
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
bobby abbott <ttobbaybbob@gmail.com>
|
bobby abbott <ttobbaybbob@gmail.com>
|
||||||
|
Boqin Qin <bobbqqin@gmail.com>
|
||||||
Boris Pruessmann <boris@pruessmann.org>
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
Boshi Lian <farmer1992@gmail.com>
|
Boshi Lian <farmer1992@gmail.com>
|
||||||
Bouke Haarsma <bouke@webatoom.nl>
|
Bouke Haarsma <bouke@webatoom.nl>
|
||||||
@ -334,7 +341,7 @@ Chris Gibson <chris@chrisg.io>
|
|||||||
Chris Khoo <chris.khoo@gmail.com>
|
Chris Khoo <chris.khoo@gmail.com>
|
||||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||||
Chris McKinnel <chrismckinnel@gmail.com>
|
Chris McKinnel <chrismckinnel@gmail.com>
|
||||||
Chris Price <chris.price@docker.com>
|
Chris Price <cprice@mirantis.com>
|
||||||
Chris Seto <chriskseto@gmail.com>
|
Chris Seto <chriskseto@gmail.com>
|
||||||
Chris Snow <chsnow123@gmail.com>
|
Chris Snow <chsnow123@gmail.com>
|
||||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||||
@ -361,7 +368,7 @@ Christopher Currie <codemonkey+github@gmail.com>
|
|||||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||||
Christopher Latham <sudosurootdev@gmail.com>
|
Christopher Latham <sudosurootdev@gmail.com>
|
||||||
Christopher Rigor <crigor@gmail.com>
|
Christopher Rigor <crigor@gmail.com>
|
||||||
Christy Perez <christy@linux.vnet.ibm.com>
|
Christy Norman <christy@linux.vnet.ibm.com>
|
||||||
Chun Chen <ramichen@tencent.com>
|
Chun Chen <ramichen@tencent.com>
|
||||||
Ciro S. Costa <ciro.costa@usp.br>
|
Ciro S. Costa <ciro.costa@usp.br>
|
||||||
Clayton Coleman <ccoleman@redhat.com>
|
Clayton Coleman <ccoleman@redhat.com>
|
||||||
@ -381,8 +388,10 @@ Corey Farrell <git@cfware.com>
|
|||||||
Cory Forsyth <cory.forsyth@gmail.com>
|
Cory Forsyth <cory.forsyth@gmail.com>
|
||||||
cressie176 <github@stephen-cresswell.net>
|
cressie176 <github@stephen-cresswell.net>
|
||||||
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
||||||
|
Cristian Ariza <dev@cristianrz.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
cristiano balducci <cristiano.balducci@gmail.com>
|
cristiano balducci <cristiano.balducci@gmail.com>
|
||||||
|
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
|
||||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||||
CUI Wei <ghostplant@qq.com>
|
CUI Wei <ghostplant@qq.com>
|
||||||
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
||||||
@ -409,12 +418,14 @@ Dan Williams <me@deedubs.com>
|
|||||||
Dani Hodovic <dani.hodovic@gmail.com>
|
Dani Hodovic <dani.hodovic@gmail.com>
|
||||||
Dani Louca <dani.louca@docker.com>
|
Dani Louca <dani.louca@docker.com>
|
||||||
Daniel Antlinger <d.antlinger@gmx.at>
|
Daniel Antlinger <d.antlinger@gmx.at>
|
||||||
|
Daniel Black <daniel@linux.ibm.com>
|
||||||
Daniel Dao <dqminh@cloudflare.com>
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
Daniel Exner <dex@dragonslave.de>
|
Daniel Exner <dex@dragonslave.de>
|
||||||
Daniel Farrell <dfarrell@redhat.com>
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
Daniel Garcia <daniel@danielgarcia.info>
|
Daniel Garcia <daniel@danielgarcia.info>
|
||||||
Daniel Gasienica <daniel@gasienica.ch>
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
Daniel Grunwell <mwgrunny@gmail.com>
|
Daniel Grunwell <mwgrunny@gmail.com>
|
||||||
|
Daniel Helfand <helfand.4@gmail.com>
|
||||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
Daniel J Walsh <dwalsh@redhat.com>
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
Daniel Menet <membership@sontags.ch>
|
Daniel Menet <membership@sontags.ch>
|
||||||
@ -496,6 +507,7 @@ Derek McGowan <derek@mcgstyle.net>
|
|||||||
Deric Crago <deric.crago@gmail.com>
|
Deric Crago <deric.crago@gmail.com>
|
||||||
Deshi Xiao <dxiao@redhat.com>
|
Deshi Xiao <dxiao@redhat.com>
|
||||||
devmeyster <arthurfbi@yahoo.com>
|
devmeyster <arthurfbi@yahoo.com>
|
||||||
|
Devon Estes <devon.estes@klarna.com>
|
||||||
Devvyn Murphy <devvyn@devvyn.com>
|
Devvyn Murphy <devvyn@devvyn.com>
|
||||||
Dharmit Shah <shahdharmit@gmail.com>
|
Dharmit Shah <shahdharmit@gmail.com>
|
||||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||||
@ -545,7 +557,7 @@ Douglas Curtis <dougcurtis1@gmail.com>
|
|||||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||||
dragon788 <dragon788@users.noreply.github.com>
|
dragon788 <dragon788@users.noreply.github.com>
|
||||||
Dražen Lučanin <kermit666@gmail.com>
|
Dražen Lučanin <kermit666@gmail.com>
|
||||||
Drew Erny <drew.erny@docker.com>
|
Drew Erny <derny@mirantis.com>
|
||||||
Drew Hubl <drew.hubl@gmail.com>
|
Drew Hubl <drew.hubl@gmail.com>
|
||||||
Dustin Sallings <dustin@spy.net>
|
Dustin Sallings <dustin@spy.net>
|
||||||
Ed Costello <epc@epcostello.com>
|
Ed Costello <epc@epcostello.com>
|
||||||
@ -607,6 +619,7 @@ Evan Phoenix <evan@fallingsnow.net>
|
|||||||
Evan Wies <evan@neomantra.net>
|
Evan Wies <evan@neomantra.net>
|
||||||
Evelyn Xu <evelynhsu21@gmail.com>
|
Evelyn Xu <evelynhsu21@gmail.com>
|
||||||
Everett Toews <everett.toews@rackspace.com>
|
Everett Toews <everett.toews@rackspace.com>
|
||||||
|
Evgeniy Makhrov <e.makhrov@corp.badoo.com>
|
||||||
Evgeny Shmarnev <shmarnev@gmail.com>
|
Evgeny Shmarnev <shmarnev@gmail.com>
|
||||||
Evgeny Vereshchagin <evvers@ya.ru>
|
Evgeny Vereshchagin <evvers@ya.ru>
|
||||||
Ewa Czechowska <ewa@ai-traders.com>
|
Ewa Czechowska <ewa@ai-traders.com>
|
||||||
@ -653,6 +666,7 @@ Florian <FWirtz@users.noreply.github.com>
|
|||||||
Florian Klein <florian.klein@free.fr>
|
Florian Klein <florian.klein@free.fr>
|
||||||
Florian Maier <marsmensch@users.noreply.github.com>
|
Florian Maier <marsmensch@users.noreply.github.com>
|
||||||
Florian Noeding <noeding@adobe.com>
|
Florian Noeding <noeding@adobe.com>
|
||||||
|
Florian Schmaus <flo@geekplace.eu>
|
||||||
Florian Weingarten <flo@hackvalue.de>
|
Florian Weingarten <flo@hackvalue.de>
|
||||||
Florin Asavoaie <florin.asavoaie@gmail.com>
|
Florin Asavoaie <florin.asavoaie@gmail.com>
|
||||||
Florin Patan <florinpatan@gmail.com>
|
Florin Patan <florinpatan@gmail.com>
|
||||||
@ -689,7 +703,7 @@ Gareth Rushgrove <gareth@morethanseven.net>
|
|||||||
Garrett Barboza <garrett@garrettbarboza.com>
|
Garrett Barboza <garrett@garrettbarboza.com>
|
||||||
Gary Schaetz <gary@schaetzkc.com>
|
Gary Schaetz <gary@schaetzkc.com>
|
||||||
Gaurav <gaurav.gosec@gmail.com>
|
Gaurav <gaurav.gosec@gmail.com>
|
||||||
gautam, prasanna <prasannagautam@gmail.com>
|
Gaurav Singh <gaurav1086@gmail.com>
|
||||||
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||||
Genki Takiuchi <genki@s21g.com>
|
Genki Takiuchi <genki@s21g.com>
|
||||||
GennadySpb <lipenkov@gmail.com>
|
GennadySpb <lipenkov@gmail.com>
|
||||||
@ -720,7 +734,7 @@ Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
|||||||
Gosuke Miyashita <gosukenator@gmail.com>
|
Gosuke Miyashita <gosukenator@gmail.com>
|
||||||
Gou Rao <gou@portworx.com>
|
Gou Rao <gou@portworx.com>
|
||||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||||
Grant Millar <grant@cylo.io>
|
Grant Millar <rid@cylo.io>
|
||||||
Grant Reaber <grant.reaber@gmail.com>
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
Graydon Hoare <graydon@pobox.com>
|
Graydon Hoare <graydon@pobox.com>
|
||||||
Greg Fausak <greg@tacodata.com>
|
Greg Fausak <greg@tacodata.com>
|
||||||
@ -743,6 +757,7 @@ Haichao Yang <yang.haichao@zte.com.cn>
|
|||||||
haikuoliu <haikuo@amazon.com>
|
haikuoliu <haikuo@amazon.com>
|
||||||
Hakan Özler <hakan.ozler@kodcu.com>
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
Hamish Hutchings <moredhel@aoeu.me>
|
Hamish Hutchings <moredhel@aoeu.me>
|
||||||
|
Hannes Ljungberg <hannes@5monkeys.se>
|
||||||
Hans Kristian Flaatten <hans@starefossen.com>
|
Hans Kristian Flaatten <hans@starefossen.com>
|
||||||
Hans Rødtang <hansrodtang@gmail.com>
|
Hans Rødtang <hansrodtang@gmail.com>
|
||||||
Hao Shu Wei <haosw@cn.ibm.com>
|
Hao Shu Wei <haosw@cn.ibm.com>
|
||||||
@ -769,6 +784,8 @@ Hollie Teal <hollie@docker.com>
|
|||||||
Hong Xu <hong@topbug.net>
|
Hong Xu <hong@topbug.net>
|
||||||
Hongbin Lu <hongbin034@gmail.com>
|
Hongbin Lu <hongbin034@gmail.com>
|
||||||
Hongxu Jia <hongxu.jia@windriver.com>
|
Hongxu Jia <hongxu.jia@windriver.com>
|
||||||
|
Honza Pokorny <me@honza.ca>
|
||||||
|
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||||
hsinko <21551195@zju.edu.cn>
|
hsinko <21551195@zju.edu.cn>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hu Tao <hutao@cn.fujitsu.com>
|
Hu Tao <hutao@cn.fujitsu.com>
|
||||||
@ -809,6 +826,7 @@ Ingo Gottwald <in.gottwald@gmail.com>
|
|||||||
Innovimax <innovimax@gmail.com>
|
Innovimax <innovimax@gmail.com>
|
||||||
Isaac Dupree <antispam@idupree.com>
|
Isaac Dupree <antispam@idupree.com>
|
||||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||||
|
Isaiah Grace <irgkenya4@gmail.com>
|
||||||
Isao Jonas <isao.jonas@gmail.com>
|
Isao Jonas <isao.jonas@gmail.com>
|
||||||
Iskander Sharipov <quasilyte@gmail.com>
|
Iskander Sharipov <quasilyte@gmail.com>
|
||||||
Ivan Babrou <ibobrik@gmail.com>
|
Ivan Babrou <ibobrik@gmail.com>
|
||||||
@ -824,6 +842,7 @@ Jacob Edelman <edelman.jd@gmail.com>
|
|||||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||||
Jacob Vallejo <jakeev@amazon.com>
|
Jacob Vallejo <jakeev@amazon.com>
|
||||||
Jacob Wen <jian.w.wen@oracle.com>
|
Jacob Wen <jian.w.wen@oracle.com>
|
||||||
|
Jaime Cepeda <jcepedavillamayor@gmail.com>
|
||||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
Jake Champlin <jake.champlin.27@gmail.com>
|
Jake Champlin <jake.champlin.27@gmail.com>
|
||||||
Jake Moshenko <jake@devtable.com>
|
Jake Moshenko <jake@devtable.com>
|
||||||
@ -838,12 +857,13 @@ James Kyburz <james.kyburz@gmail.com>
|
|||||||
James Kyle <james@jameskyle.org>
|
James Kyle <james@jameskyle.org>
|
||||||
James Lal <james@lightsofapollo.com>
|
James Lal <james@lightsofapollo.com>
|
||||||
James Mills <prologic@shortcircuit.net.au>
|
James Mills <prologic@shortcircuit.net.au>
|
||||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
James Nesbitt <jnesbitt@mirantis.com>
|
||||||
James Nugent <james@jen20.com>
|
James Nugent <james@jen20.com>
|
||||||
James Turnbull <james@lovedthanlost.net>
|
James Turnbull <james@lovedthanlost.net>
|
||||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||||
Jamie Hannaford <jamie@limetree.org>
|
Jamie Hannaford <jamie@limetree.org>
|
||||||
Jamshid Afshar <jafshar@yahoo.com>
|
Jamshid Afshar <jafshar@yahoo.com>
|
||||||
|
Jan Chren <dev.rindeal@gmail.com>
|
||||||
Jan Keromnes <janx@linux.com>
|
Jan Keromnes <janx@linux.com>
|
||||||
Jan Koprowski <jan.koprowski@gmail.com>
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
Jan Pazdziora <jpazdziora@redhat.com>
|
Jan Pazdziora <jpazdziora@redhat.com>
|
||||||
@ -858,6 +878,7 @@ Jared Hocutt <jaredh@netapp.com>
|
|||||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||||
jaseg <jaseg@jaseg.net>
|
jaseg <jaseg@jaseg.net>
|
||||||
Jasmine Hegman <jasmine@jhegman.com>
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
|
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||||
Jason Divock <jdivock@gmail.com>
|
Jason Divock <jdivock@gmail.com>
|
||||||
Jason Giedymin <jasong@apache.org>
|
Jason Giedymin <jasong@apache.org>
|
||||||
Jason Green <Jason.Green@AverInformatics.Com>
|
Jason Green <Jason.Green@AverInformatics.Com>
|
||||||
@ -905,7 +926,7 @@ Jeroen Franse <jeroenfranse@gmail.com>
|
|||||||
Jeroen Jacobs <github@jeroenj.be>
|
Jeroen Jacobs <github@jeroenj.be>
|
||||||
Jesse Dearing <jesse.dearing@gmail.com>
|
Jesse Dearing <jesse.dearing@gmail.com>
|
||||||
Jesse Dubay <jesse@thefortytwo.net>
|
Jesse Dubay <jesse@thefortytwo.net>
|
||||||
Jessica Frazelle <acidburn@microsoft.com>
|
Jessica Frazelle <jess@oxide.computer>
|
||||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
Jhon Honce <jhonce@redhat.com>
|
Jhon Honce <jhonce@redhat.com>
|
||||||
Ji.Zhilong <zhilongji@gmail.com>
|
Ji.Zhilong <zhilongji@gmail.com>
|
||||||
@ -913,6 +934,7 @@ Jian Liao <jliao@alauda.io>
|
|||||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
Jiang Jinyang <jjyruby@gmail.com>
|
Jiang Jinyang <jjyruby@gmail.com>
|
||||||
Jie Luo <luo612@zju.edu.cn>
|
Jie Luo <luo612@zju.edu.cn>
|
||||||
|
Jie Ma <jienius@outlook.com>
|
||||||
Jihyun Hwang <jhhwang@telcoware.com>
|
Jihyun Hwang <jhhwang@telcoware.com>
|
||||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||||
Jim Alateras <jima@comware.com.au>
|
Jim Alateras <jima@comware.com.au>
|
||||||
@ -969,6 +991,7 @@ Jon Johnson <jonjohnson@google.com>
|
|||||||
Jon Surrell <jon.surrell@gmail.com>
|
Jon Surrell <jon.surrell@gmail.com>
|
||||||
Jon Wedaman <jweede@gmail.com>
|
Jon Wedaman <jweede@gmail.com>
|
||||||
Jonas Dohse <jonas@dohse.ch>
|
Jonas Dohse <jonas@dohse.ch>
|
||||||
|
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||||
Jonas Pfenniger <jonas@pfenniger.name>
|
Jonas Pfenniger <jonas@pfenniger.name>
|
||||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||||
@ -1018,6 +1041,8 @@ Julien Dubois <julien.dubois@gmail.com>
|
|||||||
Julien Kassar <github@kassisol.com>
|
Julien Kassar <github@kassisol.com>
|
||||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||||
Julien Pervillé <julien.perville@perfect-memory.com>
|
Julien Pervillé <julien.perville@perfect-memory.com>
|
||||||
|
Julien Pivotto <roidelapluie@inuits.eu>
|
||||||
|
Julio Guerra <julio@sqreen.com>
|
||||||
Julio Montes <imc.coder@gmail.com>
|
Julio Montes <imc.coder@gmail.com>
|
||||||
Jun-Ru Chang <jrjang@gmail.com>
|
Jun-Ru Chang <jrjang@gmail.com>
|
||||||
Jussi Nummelin <jussi.nummelin@gmail.com>
|
Jussi Nummelin <jussi.nummelin@gmail.com>
|
||||||
@ -1191,7 +1216,6 @@ Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
|||||||
Luke Marsden <me@lukemarsden.net>
|
Luke Marsden <me@lukemarsden.net>
|
||||||
Lyn <energylyn@zju.edu.cn>
|
Lyn <energylyn@zju.edu.cn>
|
||||||
Lynda O'Leary <lyndaoleary29@gmail.com>
|
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||||
lzhfromutsc <lzhfromustc@gmail.com>
|
|
||||||
Lénaïc Huard <lhuard@amadeus.com>
|
Lénaïc Huard <lhuard@amadeus.com>
|
||||||
Ma Müller <mueller-ma@users.noreply.github.com>
|
Ma Müller <mueller-ma@users.noreply.github.com>
|
||||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||||
@ -1285,6 +1309,7 @@ Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
|||||||
Mattias Jernberg <nostrad@gmail.com>
|
Mattias Jernberg <nostrad@gmail.com>
|
||||||
Mauricio Garavaglia <mauricio@medallia.com>
|
Mauricio Garavaglia <mauricio@medallia.com>
|
||||||
mauriyouth <mauriyouth@gmail.com>
|
mauriyouth <mauriyouth@gmail.com>
|
||||||
|
Max Harmathy <max.harmathy@web.de>
|
||||||
Max Shytikov <mshytikov@gmail.com>
|
Max Shytikov <mshytikov@gmail.com>
|
||||||
Maxim Fedchyshyn <sevmax@gmail.com>
|
Maxim Fedchyshyn <sevmax@gmail.com>
|
||||||
Maxim Ivanov <ivanov.maxim@gmail.com>
|
Maxim Ivanov <ivanov.maxim@gmail.com>
|
||||||
@ -1342,6 +1367,7 @@ Miguel Morales <mimoralea@gmail.com>
|
|||||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||||
Mike Brown <brownwm@us.ibm.com>
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
|
Mike Bush <mpbush@gmail.com>
|
||||||
Mike Casas <mkcsas0@gmail.com>
|
Mike Casas <mkcsas0@gmail.com>
|
||||||
Mike Chelen <michael.chelen@gmail.com>
|
Mike Chelen <michael.chelen@gmail.com>
|
||||||
Mike Danese <mikedanese@google.com>
|
Mike Danese <mikedanese@google.com>
|
||||||
@ -1434,6 +1460,7 @@ Nik Nyby <nikolas@gnu.org>
|
|||||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||||
NikolaMandic <mn080202@gmail.com>
|
NikolaMandic <mn080202@gmail.com>
|
||||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
|
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||||
Nikolay Milovanov <nmil@itransformers.net>
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||||
Nishant Totla <nishanttotla@gmail.com>
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
@ -1637,6 +1664,7 @@ Roland Kammerer <roland.kammerer@linbit.com>
|
|||||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||||
Roma Sokolov <sokolov.r.v@gmail.com>
|
Roma Sokolov <sokolov.r.v@gmail.com>
|
||||||
Roman Dudin <katrmr@gmail.com>
|
Roman Dudin <katrmr@gmail.com>
|
||||||
|
Roman Mazur <roman@balena.io>
|
||||||
Roman Strashkin <roman.strashkin@gmail.com>
|
Roman Strashkin <roman.strashkin@gmail.com>
|
||||||
Ron Smits <ron.smits@gmail.com>
|
Ron Smits <ron.smits@gmail.com>
|
||||||
Ron Williams <ron.a.williams@gmail.com>
|
Ron Williams <ron.a.williams@gmail.com>
|
||||||
@ -1793,6 +1821,7 @@ Srini Brahmaroutu <srbrahma@us.ibm.com>
|
|||||||
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
||||||
Staf Wagemakers <staf@wagemakers.be>
|
Staf Wagemakers <staf@wagemakers.be>
|
||||||
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
||||||
|
Stanislav Levin <slev@altlinux.org>
|
||||||
Steeve Morin <steeve.morin@gmail.com>
|
Steeve Morin <steeve.morin@gmail.com>
|
||||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||||
Stefan J. Wernli <swernli@microsoft.com>
|
Stefan J. Wernli <swernli@microsoft.com>
|
||||||
@ -1804,7 +1833,7 @@ Stefan Weil <sw@weilnetz.de>
|
|||||||
Stephan Spindler <shutefan@gmail.com>
|
Stephan Spindler <shutefan@gmail.com>
|
||||||
Stephen Benjamin <stephen@redhat.com>
|
Stephen Benjamin <stephen@redhat.com>
|
||||||
Stephen Crosby <stevecrozz@gmail.com>
|
Stephen Crosby <stevecrozz@gmail.com>
|
||||||
Stephen Day <stephen.day@docker.com>
|
Stephen Day <stevvooe@gmail.com>
|
||||||
Stephen Drake <stephen@xenolith.net>
|
Stephen Drake <stephen@xenolith.net>
|
||||||
Stephen Rust <srust@blockbridge.com>
|
Stephen Rust <srust@blockbridge.com>
|
||||||
Steve Desmond <steve@vtsv.ca>
|
Steve Desmond <steve@vtsv.ca>
|
||||||
@ -1875,6 +1904,7 @@ Tianyi Wang <capkurmagati@gmail.com>
|
|||||||
Tibor Vass <teabee89@gmail.com>
|
Tibor Vass <teabee89@gmail.com>
|
||||||
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
||||||
Tiffany Low <tiffany@box.com>
|
Tiffany Low <tiffany@box.com>
|
||||||
|
Till Wegmüller <toasterson@gmail.com>
|
||||||
Tim <elatllat@gmail.com>
|
Tim <elatllat@gmail.com>
|
||||||
Tim Bart <tim@fewagainstmany.com>
|
Tim Bart <tim@fewagainstmany.com>
|
||||||
Tim Bosse <taim@bosboot.org>
|
Tim Bosse <taim@bosboot.org>
|
||||||
@ -1927,7 +1957,7 @@ Tony Miller <mcfiredrill@gmail.com>
|
|||||||
toogley <toogley@mailbox.org>
|
toogley <toogley@mailbox.org>
|
||||||
Torstein Husebø <torstein@huseboe.net>
|
Torstein Husebø <torstein@huseboe.net>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
tpng <benny.tpng@gmail.com>
|
Trace Andreason <tandreason@gmail.com>
|
||||||
tracylihui <793912329@qq.com>
|
tracylihui <793912329@qq.com>
|
||||||
Trapier Marshall <trapier.marshall@docker.com>
|
Trapier Marshall <trapier.marshall@docker.com>
|
||||||
Travis Cline <travis.cline@gmail.com>
|
Travis Cline <travis.cline@gmail.com>
|
||||||
@ -1950,6 +1980,7 @@ Utz Bacher <utz.bacher@de.ibm.com>
|
|||||||
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
||||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||||
vanderliang <lansheng@meili-inc.com>
|
vanderliang <lansheng@meili-inc.com>
|
||||||
|
Velko Ivanov <vivanov@deeperplane.com>
|
||||||
Veres Lajos <vlajos@gmail.com>
|
Veres Lajos <vlajos@gmail.com>
|
||||||
Victor Algaze <valgaze@gmail.com>
|
Victor Algaze <valgaze@gmail.com>
|
||||||
Victor Coisne <victor.coisne@dotcloud.com>
|
Victor Coisne <victor.coisne@dotcloud.com>
|
||||||
@ -1961,12 +1992,13 @@ Victor Palma <palma.victor@gmail.com>
|
|||||||
Victor Vieux <victor.vieux@docker.com>
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||||
Vikram bir Singh <vikrambir.singh@docker.com>
|
Vikram bir Singh <vsingh@mirantis.com>
|
||||||
Viktor Stanchev <me@viktorstanchev.com>
|
Viktor Stanchev <me@viktorstanchev.com>
|
||||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||||
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
||||||
Vincent Batts <vbatts@redhat.com>
|
Vincent Batts <vbatts@redhat.com>
|
||||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||||
|
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
||||||
Vincent Demeester <vincent.demeester@docker.com>
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
Vincent Giersch <vincent.giersch@ovh.net>
|
||||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||||
@ -1997,6 +2029,8 @@ Wang Long <long.wanglong@huawei.com>
|
|||||||
Wang Ping <present.wp@icloud.com>
|
Wang Ping <present.wp@icloud.com>
|
||||||
Wang Xing <hzwangxing@corp.netease.com>
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wang Yumu <37442693@qq.com>
|
||||||
|
wanghuaiqing <wanghuaiqing@loongson.cn>
|
||||||
Ward Vandewege <ward@jhvc.com>
|
Ward Vandewege <ward@jhvc.com>
|
||||||
WarheadsSE <max@warheads.net>
|
WarheadsSE <max@warheads.net>
|
||||||
Wassim Dhif <wassimdhif@gmail.com>
|
Wassim Dhif <wassimdhif@gmail.com>
|
||||||
@ -2013,6 +2047,7 @@ Wen Cheng Ma <wenchma@cn.ibm.com>
|
|||||||
Wendel Fleming <wfleming@usc.edu>
|
Wendel Fleming <wfleming@usc.edu>
|
||||||
Wenjun Tang <tangwj2@lenovo.com>
|
Wenjun Tang <tangwj2@lenovo.com>
|
||||||
Wenkai Yin <yinw@vmware.com>
|
Wenkai Yin <yinw@vmware.com>
|
||||||
|
wenlxie <wenlxie@ebay.com>
|
||||||
Wentao Zhang <zhangwentao234@huawei.com>
|
Wentao Zhang <zhangwentao234@huawei.com>
|
||||||
Wenxuan Zhao <viz@linux.com>
|
Wenxuan Zhao <viz@linux.com>
|
||||||
Wenyu You <21551128@zju.edu.cn>
|
Wenyu You <21551128@zju.edu.cn>
|
||||||
@ -2030,6 +2065,8 @@ William Hubbs <w.d.hubbs@gmail.com>
|
|||||||
William Martin <wmartin@pivotal.io>
|
William Martin <wmartin@pivotal.io>
|
||||||
William Riancho <wr.wllm@gmail.com>
|
William Riancho <wr.wllm@gmail.com>
|
||||||
William Thurston <thurstw@amazon.com>
|
William Thurston <thurstw@amazon.com>
|
||||||
|
Wilson Júnior <wilsonpjunior@gmail.com>
|
||||||
|
Wing-Kam Wong <wingkwong.code@gmail.com>
|
||||||
WiseTrem <shepelyov.g@gmail.com>
|
WiseTrem <shepelyov.g@gmail.com>
|
||||||
Wolfgang Powisch <powo@powo.priv.at>
|
Wolfgang Powisch <powo@powo.priv.at>
|
||||||
Wonjun Kim <wonjun.kim@navercorp.com>
|
Wonjun Kim <wonjun.kim@navercorp.com>
|
||||||
@ -2039,6 +2076,7 @@ Xianglin Gao <xlgao@zju.edu.cn>
|
|||||||
Xianlu Bird <xianlubird@gmail.com>
|
Xianlu Bird <xianlubird@gmail.com>
|
||||||
Xiao YongBiao <xyb4638@gmail.com>
|
Xiao YongBiao <xyb4638@gmail.com>
|
||||||
XiaoBing Jiang <s7v7nislands@gmail.com>
|
XiaoBing Jiang <s7v7nislands@gmail.com>
|
||||||
|
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||||
Xiaodong Zhang <a4012017@sina.com>
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
Xiaoxi He <xxhe@alauda.io>
|
Xiaoxi He <xxhe@alauda.io>
|
||||||
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
||||||
@ -2109,6 +2147,7 @@ Zhenan Ye <21551168@zju.edu.cn>
|
|||||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||||
Zhenhai Gao <gaozh1988@live.com>
|
Zhenhai Gao <gaozh1988@live.com>
|
||||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||||
|
zhipengzuo <zuozhipeng@baidu.com>
|
||||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||||
@ -2129,6 +2168,7 @@ Zunayed Ali <zunayed@gmail.com>
|
|||||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
Átila Camurça Alves <camurca.home@gmail.com>
|
Átila Camurça Alves <camurca.home@gmail.com>
|
||||||
尹吉峰 <jifeng.yin@gmail.com>
|
尹吉峰 <jifeng.yin@gmail.com>
|
||||||
|
屈骏 <qujun@tiduyun.com>
|
||||||
徐俊杰 <paco.xu@daocloud.io>
|
徐俊杰 <paco.xu@daocloud.io>
|
||||||
慕陶 <jihui.xjh@alibaba-inc.com>
|
慕陶 <jihui.xjh@alibaba-inc.com>
|
||||||
搏通 <yufeng.pyf@alibaba-inc.com>
|
搏通 <yufeng.pyf@alibaba-inc.com>
|
||||||
|
24
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
24
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
@ -3306,6 +3306,22 @@ definitions:
|
|||||||
type: "string"
|
type: "string"
|
||||||
example:
|
example:
|
||||||
- "CAP_NET_RAW"
|
- "CAP_NET_RAW"
|
||||||
|
Ulimits:
|
||||||
|
description: |
|
||||||
|
A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
|
||||||
|
type: "array"
|
||||||
|
items:
|
||||||
|
type: "object"
|
||||||
|
properties:
|
||||||
|
Name:
|
||||||
|
description: "Name of ulimit"
|
||||||
|
type: "string"
|
||||||
|
Soft:
|
||||||
|
description: "Soft limit"
|
||||||
|
type: "integer"
|
||||||
|
Hard:
|
||||||
|
description: "Hard limit"
|
||||||
|
type: "integer"
|
||||||
NetworkAttachmentSpec:
|
NetworkAttachmentSpec:
|
||||||
description: |
|
description: |
|
||||||
Read-only spec type for non-swarm containers attached to swarm overlay
|
Read-only spec type for non-swarm containers attached to swarm overlay
|
||||||
@ -4590,7 +4606,7 @@ definitions:
|
|||||||
example: 4
|
example: 4
|
||||||
MemTotal:
|
MemTotal:
|
||||||
description: |
|
description: |
|
||||||
Total amount of physical memory available on the host, in kilobytes (kB).
|
Total amount of physical memory available on the host, in bytes.
|
||||||
type: "integer"
|
type: "integer"
|
||||||
format: "int64"
|
format: "int64"
|
||||||
example: 2095882240
|
example: 2095882240
|
||||||
@ -6907,7 +6923,7 @@ paths:
|
|||||||
type: "string"
|
type: "string"
|
||||||
- name: "v"
|
- name: "v"
|
||||||
in: "query"
|
in: "query"
|
||||||
description: "Remove the volumes associated with the container."
|
description: "Remove anonymous volumes associated with the container."
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
default: false
|
default: false
|
||||||
- name: "force"
|
- name: "force"
|
||||||
@ -8007,7 +8023,7 @@ paths:
|
|||||||
API-Version:
|
API-Version:
|
||||||
type: "string"
|
type: "string"
|
||||||
description: "Max API Version the server supports"
|
description: "Max API Version the server supports"
|
||||||
BuildKit-Version:
|
Builder-Version:
|
||||||
type: "string"
|
type: "string"
|
||||||
description: "Default version of docker image builder"
|
description: "Default version of docker image builder"
|
||||||
Docker-Experimental:
|
Docker-Experimental:
|
||||||
@ -8046,7 +8062,7 @@ paths:
|
|||||||
API-Version:
|
API-Version:
|
||||||
type: "string"
|
type: "string"
|
||||||
description: "Max API Version the server supports"
|
description: "Max API Version the server supports"
|
||||||
BuildKit-Version:
|
Builder-Version:
|
||||||
type: "string"
|
type: "string"
|
||||||
description: "Default version of docker image builder"
|
description: "Default version of docker image builder"
|
||||||
Docker-Experimental:
|
Docker-Experimental:
|
||||||
|
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
@ -113,7 +113,7 @@ type TmpfsOptions struct {
|
|||||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||||
// daemon, that are accepted. Only the most basic are added for now.
|
// daemon, that are accepted. Only the most basic are added for now.
|
||||||
//
|
//
|
||||||
// From docker/docker/pkg/mount/flags.go:
|
// From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56
|
||||||
//
|
//
|
||||||
// var validFlags = map[string]bool{
|
// var validFlags = map[string]bool{
|
||||||
// "": true,
|
// "": true,
|
||||||
|
94
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
94
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
@ -1,94 +0,0 @@
|
|||||||
package types // import "github.com/docker/docker/api/types"
|
|
||||||
|
|
||||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
|
||||||
type Seccomp struct {
|
|
||||||
DefaultAction Action `json:"defaultAction"`
|
|
||||||
// Architectures is kept to maintain backward compatibility with the old
|
|
||||||
// seccomp profile.
|
|
||||||
Architectures []Arch `json:"architectures,omitempty"`
|
|
||||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
|
||||||
Syscalls []*Syscall `json:"syscalls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Architecture is used to represent a specific architecture
|
|
||||||
// and its sub-architectures
|
|
||||||
type Architecture struct {
|
|
||||||
Arch Arch `json:"architecture"`
|
|
||||||
SubArches []Arch `json:"subArchitectures"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Arch used for architectures
|
|
||||||
type Arch string
|
|
||||||
|
|
||||||
// Additional architectures permitted to be used for system calls
|
|
||||||
// By default only the native architecture of the kernel is permitted
|
|
||||||
const (
|
|
||||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
|
||||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
|
||||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
|
||||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
|
||||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
|
||||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
|
||||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
|
||||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
|
||||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
|
||||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
|
||||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
|
||||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
|
||||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
|
||||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
|
||||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
|
||||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Action taken upon Seccomp rule match
|
|
||||||
type Action string
|
|
||||||
|
|
||||||
// Define actions for Seccomp rules
|
|
||||||
const (
|
|
||||||
ActKill Action = "SCMP_ACT_KILL"
|
|
||||||
ActTrap Action = "SCMP_ACT_TRAP"
|
|
||||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
|
||||||
ActTrace Action = "SCMP_ACT_TRACE"
|
|
||||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operator used to match syscall arguments in Seccomp
|
|
||||||
type Operator string
|
|
||||||
|
|
||||||
// Define operators for syscall arguments in Seccomp
|
|
||||||
const (
|
|
||||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
|
||||||
OpLessThan Operator = "SCMP_CMP_LT"
|
|
||||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
|
||||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
|
||||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
|
||||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
|
||||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Arg used for matching specific syscall arguments in Seccomp
|
|
||||||
type Arg struct {
|
|
||||||
Index uint `json:"index"`
|
|
||||||
Value uint64 `json:"value"`
|
|
||||||
ValueTwo uint64 `json:"valueTwo"`
|
|
||||||
Op Operator `json:"op"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter is used to conditionally apply Seccomp rules
|
|
||||||
type Filter struct {
|
|
||||||
Caps []string `json:"caps,omitempty"`
|
|
||||||
Arches []string `json:"arches,omitempty"`
|
|
||||||
MinKernel string `json:"minKernel,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Syscall is used to match a group of syscalls in Seccomp
|
|
||||||
type Syscall struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Names []string `json:"names,omitempty"`
|
|
||||||
Action Action `json:"action"`
|
|
||||||
Args []*Arg `json:"args"`
|
|
||||||
Comment string `json:"comment"`
|
|
||||||
Includes Filter `json:"includes"`
|
|
||||||
Excludes Filter `json:"excludes"`
|
|
||||||
}
|
|
2
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
|
"github.com/docker/go-units"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||||
@ -75,4 +76,5 @@ type ContainerSpec struct {
|
|||||||
Sysctls map[string]string `json:",omitempty"`
|
Sysctls map[string]string `json:",omitempty"`
|
||||||
CapabilityAdd []string `json:",omitempty"`
|
CapabilityAdd []string `json:",omitempty"`
|
||||||
CapabilityDrop []string `json:",omitempty"`
|
CapabilityDrop []string `json:",omitempty"`
|
||||||
|
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/docker/docker/client/client.go
generated
vendored
4
vendor/github.com/docker/docker/client/client.go
generated
vendored
@ -7,8 +7,8 @@ https://docs.docker.com/engine/reference/api/
|
|||||||
Usage
|
Usage
|
||||||
|
|
||||||
You use the library by creating a client object and calling methods on it. The
|
You use the library by creating a client object and calling methods on it. The
|
||||||
client can be created either from environment variables with NewEnvClient, or
|
client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
|
||||||
configured manually with NewClient.
|
or configured manually with NewClient().
|
||||||
|
|
||||||
For example, to list running containers (the equivalent of "docker ps"):
|
For example, to list running containers (the equivalent of "docker ps"):
|
||||||
|
|
||||||
|
3
vendor/github.com/docker/docker/client/request.go
generated
vendored
3
vendor/github.com/docker/docker/client/request.go
generated
vendored
@ -134,8 +134,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
|
|||||||
|
|
||||||
// Don't decorate context sentinel errors; users may be comparing to
|
// Don't decorate context sentinel errors; users may be comparing to
|
||||||
// them directly.
|
// them directly.
|
||||||
switch err {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
case context.Canceled, context.DeadlineExceeded:
|
|
||||||
return serverResp, err
|
return serverResp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/docker/docker/pkg/system/rm.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/system/rm.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// +build !darwin,!windows
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
6
vendor/github.com/docker/docker/pkg/system/rm_windows.go
generated
vendored
Normal file
6
vendor/github.com/docker/docker/pkg/system/rm_windows.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package system
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// EnsureRemoveAll is an alias to os.RemoveAll on Windows
|
||||||
|
var EnsureRemoveAll = os.RemoveAll
|
@ -1,3 +1,5 @@
|
|||||||
|
// +build freebsd netbsd
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
import "syscall"
|
import "syscall"
|
13
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
Normal file
13
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package system // import "github.com/docker/docker/pkg/system"
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||||
|
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||||
|
return &StatT{size: s.Size,
|
||||||
|
mode: s.Mode,
|
||||||
|
uid: s.Uid,
|
||||||
|
gid: s.Gid,
|
||||||
|
rdev: s.Rdev,
|
||||||
|
mtim: s.Mtim}, nil
|
||||||
|
}
|
21
vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
generated
vendored
21
vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
|
||||||
|
|
||||||
// BuilderContext is an interface extending TarSum by adding the Remove method.
|
|
||||||
// In general there was concern about adding this method to TarSum itself
|
|
||||||
// so instead it is being added just to "BuilderContext" which will then
|
|
||||||
// only be used during the .dockerignore file processing
|
|
||||||
// - see builder/evaluator.go
|
|
||||||
type BuilderContext interface {
|
|
||||||
TarSum
|
|
||||||
Remove(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *tarSum) Remove(filename string) {
|
|
||||||
for i, fis := range bc.sums {
|
|
||||||
if fis.Name() == filename {
|
|
||||||
bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
|
|
||||||
// Note, we don't just return because there could be
|
|
||||||
// more than one with this name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
133
vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
generated
vendored
133
vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
generated
vendored
@ -1,133 +0,0 @@
|
|||||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileInfoSumInterface provides an interface for accessing file checksum
|
|
||||||
// information within a tar file. This info is accessed through interface
|
|
||||||
// so the actual name and sum cannot be melded with.
|
|
||||||
type FileInfoSumInterface interface {
|
|
||||||
// File name
|
|
||||||
Name() string
|
|
||||||
// Checksum of this particular file and its headers
|
|
||||||
Sum() string
|
|
||||||
// Position of file in the tar
|
|
||||||
Pos() int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileInfoSum struct {
|
|
||||||
name string
|
|
||||||
sum string
|
|
||||||
pos int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fis fileInfoSum) Name() string {
|
|
||||||
return fis.name
|
|
||||||
}
|
|
||||||
func (fis fileInfoSum) Sum() string {
|
|
||||||
return fis.sum
|
|
||||||
}
|
|
||||||
func (fis fileInfoSum) Pos() int64 {
|
|
||||||
return fis.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfoSums provides a list of FileInfoSumInterfaces.
|
|
||||||
type FileInfoSums []FileInfoSumInterface
|
|
||||||
|
|
||||||
// GetFile returns the first FileInfoSumInterface with a matching name.
|
|
||||||
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
|
|
||||||
// We do case insensitive matching on Windows as c:\APP and c:\app are
|
|
||||||
// the same. See issue #33107.
|
|
||||||
for i := range fis {
|
|
||||||
if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
|
|
||||||
(runtime.GOOS != "windows" && fis[i].Name() == name) {
|
|
||||||
return fis[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllFile returns a FileInfoSums with all matching names.
|
|
||||||
func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
|
|
||||||
f := FileInfoSums{}
|
|
||||||
for i := range fis {
|
|
||||||
if fis[i].Name() == name {
|
|
||||||
f = append(f, fis[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
|
|
||||||
func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
|
|
||||||
seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
|
|
||||||
for i := range fis {
|
|
||||||
f := fis[i]
|
|
||||||
if _, ok := seen[f.Name()]; ok {
|
|
||||||
dups = append(dups, f)
|
|
||||||
} else {
|
|
||||||
seen[f.Name()] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dups
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the size of the FileInfoSums.
|
|
||||||
func (fis FileInfoSums) Len() int { return len(fis) }
|
|
||||||
|
|
||||||
// Swap swaps two FileInfoSum values if a FileInfoSums list.
|
|
||||||
func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
|
|
||||||
|
|
||||||
// SortByPos sorts FileInfoSums content by position.
|
|
||||||
func (fis FileInfoSums) SortByPos() {
|
|
||||||
sort.Sort(byPos{fis})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortByNames sorts FileInfoSums content by name.
|
|
||||||
func (fis FileInfoSums) SortByNames() {
|
|
||||||
sort.Sort(byName{fis})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortBySums sorts FileInfoSums content by sums.
|
|
||||||
func (fis FileInfoSums) SortBySums() {
|
|
||||||
dups := fis.GetDuplicatePaths()
|
|
||||||
if len(dups) > 0 {
|
|
||||||
sort.Sort(bySum{fis, dups})
|
|
||||||
} else {
|
|
||||||
sort.Sort(bySum{fis, nil})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName is a sort.Sort helper for sorting by file names.
|
|
||||||
// If names are the same, order them by their appearance in the tar archive
|
|
||||||
type byName struct{ FileInfoSums }
|
|
||||||
|
|
||||||
func (bn byName) Less(i, j int) bool {
|
|
||||||
if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
|
|
||||||
return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
|
|
||||||
}
|
|
||||||
return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
|
|
||||||
type bySum struct {
|
|
||||||
FileInfoSums
|
|
||||||
dups FileInfoSums
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs bySum) Less(i, j int) bool {
|
|
||||||
if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
|
|
||||||
return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
|
|
||||||
}
|
|
||||||
return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
|
|
||||||
type byPos struct{ FileInfoSums }
|
|
||||||
|
|
||||||
func (bp byPos) Less(i, j int) bool {
|
|
||||||
return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
|
|
||||||
}
|
|
301
vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
generated
vendored
301
vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
generated
vendored
@ -1,301 +0,0 @@
|
|||||||
// Package tarsum provides algorithms to perform checksum calculation on
|
|
||||||
// filesystem layers.
|
|
||||||
//
|
|
||||||
// The transportation of filesystems, regarding Docker, is done with tar(1)
|
|
||||||
// archives. There are a variety of tar serialization formats [2], and a key
|
|
||||||
// concern here is ensuring a repeatable checksum given a set of inputs from a
|
|
||||||
// generic tar archive. Types of transportation include distribution to and from a
|
|
||||||
// registry endpoint, saving and loading through commands or Docker daemon APIs,
|
|
||||||
// transferring the build context from client to Docker daemon, and committing the
|
|
||||||
// filesystem of a container to become an image.
|
|
||||||
//
|
|
||||||
// As tar archives are used for transit, but not preserved in many situations, the
|
|
||||||
// focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
|
||||||
// while maintaining a deterministic accountability. This includes neither
|
|
||||||
// constraining the ordering or manipulation of the files during the creation or
|
|
||||||
// unpacking of the archive, nor include additional metadata state about the file
|
|
||||||
// system attributes.
|
|
||||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
buf8K = 8 * 1024
|
|
||||||
buf16K = 16 * 1024
|
|
||||||
buf32K = 32 * 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewTarSum creates a new interface for calculating a fixed time checksum of a
|
|
||||||
// tar archive.
|
|
||||||
//
|
|
||||||
// This is used for calculating checksums of layers of an image, in some cases
|
|
||||||
// including the byte payload of the image's json metadata as well, and for
|
|
||||||
// calculating the checksums for buildcache.
|
|
||||||
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
|
|
||||||
return NewTarSumHash(r, dc, v, DefaultTHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTarSumHash creates a new TarSum, providing a THash to use rather than
|
|
||||||
// the DefaultTHash.
|
|
||||||
func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
|
|
||||||
headerSelector, err := getTarHeaderSelector(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
|
|
||||||
err = ts.initTarSum()
|
|
||||||
return ts, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
|
|
||||||
func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
|
|
||||||
parts := strings.SplitN(label, "+", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
|
|
||||||
}
|
|
||||||
|
|
||||||
versionName, hashName := parts[0], parts[1]
|
|
||||||
|
|
||||||
version, ok := tarSumVersionsByName[versionName]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
|
|
||||||
}
|
|
||||||
|
|
||||||
hashConfig, ok := standardHashConfigs[hashName]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
|
|
||||||
}
|
|
||||||
|
|
||||||
tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
|
|
||||||
|
|
||||||
return NewTarSumHash(r, disableCompression, version, tHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TarSum is the generic interface for calculating fixed time
|
|
||||||
// checksums of a tar archive.
|
|
||||||
type TarSum interface {
|
|
||||||
io.Reader
|
|
||||||
GetSums() FileInfoSums
|
|
||||||
Sum([]byte) string
|
|
||||||
Version() Version
|
|
||||||
Hash() THash
|
|
||||||
}
|
|
||||||
|
|
||||||
// tarSum struct is the structure for a Version0 checksum calculation.
|
|
||||||
type tarSum struct {
|
|
||||||
io.Reader
|
|
||||||
tarR *tar.Reader
|
|
||||||
tarW *tar.Writer
|
|
||||||
writer writeCloseFlusher
|
|
||||||
bufTar *bytes.Buffer
|
|
||||||
bufWriter *bytes.Buffer
|
|
||||||
bufData []byte
|
|
||||||
h hash.Hash
|
|
||||||
tHash THash
|
|
||||||
sums FileInfoSums
|
|
||||||
fileCounter int64
|
|
||||||
currentFile string
|
|
||||||
finished bool
|
|
||||||
first bool
|
|
||||||
DisableCompression bool // false by default. When false, the output gzip compressed.
|
|
||||||
tarSumVersion Version // this field is not exported so it can not be mutated during use
|
|
||||||
headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts tarSum) Hash() THash {
|
|
||||||
return ts.tHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts tarSum) Version() Version {
|
|
||||||
return ts.tarSumVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// THash provides a hash.Hash type generator and its name.
|
|
||||||
type THash interface {
|
|
||||||
Hash() hash.Hash
|
|
||||||
Name() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTHash is a convenience method for creating a THash.
|
|
||||||
func NewTHash(name string, h func() hash.Hash) THash {
|
|
||||||
return simpleTHash{n: name, h: h}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tHashConfig struct {
|
|
||||||
name string
|
|
||||||
hash crypto.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
|
|
||||||
standardHashConfigs = map[string]tHashConfig{
|
|
||||||
"sha256": {name: "sha256", hash: crypto.SHA256},
|
|
||||||
"sha512": {name: "sha512", hash: crypto.SHA512},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultTHash is default TarSum hashing algorithm - "sha256".
|
|
||||||
var DefaultTHash = NewTHash("sha256", sha256.New)
|
|
||||||
|
|
||||||
type simpleTHash struct {
|
|
||||||
n string
|
|
||||||
h func() hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sth simpleTHash) Name() string { return sth.n }
|
|
||||||
func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
|
|
||||||
|
|
||||||
func (ts *tarSum) encodeHeader(h *tar.Header) error {
|
|
||||||
for _, elem := range ts.headerSelector.selectHeaders(h) {
|
|
||||||
// Ignore these headers to be compatible with versions
|
|
||||||
// before go 1.10
|
|
||||||
if elem[0] == "gname" || elem[0] == "uname" {
|
|
||||||
elem[1] = ""
|
|
||||||
}
|
|
||||||
if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *tarSum) initTarSum() error {
|
|
||||||
ts.bufTar = bytes.NewBuffer([]byte{})
|
|
||||||
ts.bufWriter = bytes.NewBuffer([]byte{})
|
|
||||||
ts.tarR = tar.NewReader(ts.Reader)
|
|
||||||
ts.tarW = tar.NewWriter(ts.bufTar)
|
|
||||||
if !ts.DisableCompression {
|
|
||||||
ts.writer = gzip.NewWriter(ts.bufWriter)
|
|
||||||
} else {
|
|
||||||
ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
|
|
||||||
}
|
|
||||||
if ts.tHash == nil {
|
|
||||||
ts.tHash = DefaultTHash
|
|
||||||
}
|
|
||||||
ts.h = ts.tHash.Hash()
|
|
||||||
ts.h.Reset()
|
|
||||||
ts.first = true
|
|
||||||
ts.sums = FileInfoSums{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *tarSum) Read(buf []byte) (int, error) {
|
|
||||||
if ts.finished {
|
|
||||||
return ts.bufWriter.Read(buf)
|
|
||||||
}
|
|
||||||
if len(ts.bufData) < len(buf) {
|
|
||||||
switch {
|
|
||||||
case len(buf) <= buf8K:
|
|
||||||
ts.bufData = make([]byte, buf8K)
|
|
||||||
case len(buf) <= buf16K:
|
|
||||||
ts.bufData = make([]byte, buf16K)
|
|
||||||
case len(buf) <= buf32K:
|
|
||||||
ts.bufData = make([]byte, buf32K)
|
|
||||||
default:
|
|
||||||
ts.bufData = make([]byte, len(buf))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf2 := ts.bufData[:len(buf)]
|
|
||||||
|
|
||||||
n, err := ts.tarR.Read(buf2)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
if _, err := ts.h.Write(buf2[:n]); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if !ts.first {
|
|
||||||
ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
|
|
||||||
ts.fileCounter++
|
|
||||||
ts.h.Reset()
|
|
||||||
} else {
|
|
||||||
ts.first = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := ts.tarW.Write(buf2[:n]); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
currentHeader, err := ts.tarR.Next()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
if err := ts.tarW.Close(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := ts.writer.Close(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
ts.finished = true
|
|
||||||
return ts.bufWriter.Read(buf)
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name))
|
|
||||||
if err := ts.encodeHeader(currentHeader); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := ts.tarW.WriteHeader(currentHeader); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
ts.writer.Flush()
|
|
||||||
|
|
||||||
return ts.bufWriter.Read(buf)
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filling the hash buffer
|
|
||||||
if _, err = ts.h.Write(buf2[:n]); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filling the tar writer
|
|
||||||
if _, err = ts.tarW.Write(buf2[:n]); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filling the output writer
|
|
||||||
if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
ts.writer.Flush()
|
|
||||||
|
|
||||||
return ts.bufWriter.Read(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *tarSum) Sum(extra []byte) string {
|
|
||||||
ts.sums.SortBySums()
|
|
||||||
h := ts.tHash.Hash()
|
|
||||||
if extra != nil {
|
|
||||||
h.Write(extra)
|
|
||||||
}
|
|
||||||
for _, fis := range ts.sums {
|
|
||||||
h.Write([]byte(fis.Sum()))
|
|
||||||
}
|
|
||||||
checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
|
|
||||||
return checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *tarSum) GetSums() FileInfoSums {
|
|
||||||
return ts.sums
|
|
||||||
}
|
|
230
vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
230
vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
@ -1,230 +0,0 @@
|
|||||||
page_title: TarSum checksum specification
|
|
||||||
page_description: Documentation for algorithms used in the TarSum checksum calculation
|
|
||||||
page_keywords: docker, checksum, validation, tarsum
|
|
||||||
|
|
||||||
# TarSum Checksum Specification
|
|
||||||
|
|
||||||
## Abstract
|
|
||||||
|
|
||||||
This document describes the algorithms used in performing the TarSum checksum
|
|
||||||
calculation on filesystem layers, the need for this method over existing
|
|
||||||
methods, and the versioning of this calculation.
|
|
||||||
|
|
||||||
## Warning
|
|
||||||
|
|
||||||
This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
|
|
||||||
|
|
||||||
This is _not_ a cryptographic attestation, and should not be considered secure.
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The transportation of filesystems, regarding Docker, is done with tar(1)
|
|
||||||
archives. There are a variety of tar serialization formats [2], and a key
|
|
||||||
concern here is ensuring a repeatable checksum given a set of inputs from a
|
|
||||||
generic tar archive. Types of transportation include distribution to and from a
|
|
||||||
registry endpoint, saving and loading through commands or Docker daemon APIs,
|
|
||||||
transferring the build context from client to Docker daemon, and committing the
|
|
||||||
filesystem of a container to become an image.
|
|
||||||
|
|
||||||
As tar archives are used for transit, but not preserved in many situations, the
|
|
||||||
focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
|
||||||
while maintaining a deterministic accountability. This includes neither
|
|
||||||
constraining the ordering or manipulation of the files during the creation or
|
|
||||||
unpacking of the archive, nor include additional metadata state about the file
|
|
||||||
system attributes.
|
|
||||||
|
|
||||||
## Intended Audience
|
|
||||||
|
|
||||||
This document is outlining the methods used for consistent checksum calculation
|
|
||||||
for filesystems transported via tar archives.
|
|
||||||
|
|
||||||
Auditing these methodologies is an open and iterative process. This document
|
|
||||||
should accommodate the review of source code. Ultimately, this document should
|
|
||||||
be the starting point of further refinements to the algorithm and its future
|
|
||||||
versions.
|
|
||||||
|
|
||||||
## Concept
|
|
||||||
|
|
||||||
The checksum mechanism must ensure the integrity and assurance of the
|
|
||||||
filesystem payload.
|
|
||||||
|
|
||||||
## Checksum Algorithm Profile
|
|
||||||
|
|
||||||
A checksum mechanism must define the following operations and attributes:
|
|
||||||
|
|
||||||
* Associated hashing cipher - used to checksum each file payload and attribute
|
|
||||||
information.
|
|
||||||
* Checksum list - each file of the filesystem archive has its checksum
|
|
||||||
calculated from the payload and attributes of the file. The final checksum is
|
|
||||||
calculated from this list, with specific ordering.
|
|
||||||
* Version - as the algorithm adapts to requirements, there are behaviors of the
|
|
||||||
algorithm to manage by versioning.
|
|
||||||
* Archive being calculated - the tar archive having its checksum calculated
|
|
||||||
|
|
||||||
## Elements of TarSum checksum
|
|
||||||
|
|
||||||
The calculated sum output is a text string. The elements included in the output
|
|
||||||
of the calculated sum comprise the information needed for validation of the sum
|
|
||||||
(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
|
|
||||||
form.
|
|
||||||
|
|
||||||
There are two delimiters used:
|
|
||||||
* '+' separates TarSum version from hashing cipher
|
|
||||||
* ':' separates calculation mechanics from expected hash
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```
|
|
||||||
"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
|
|
||||||
| | \ |
|
|
||||||
| | \ |
|
|
||||||
|_version_|_cipher__|__ |
|
|
||||||
| \ |
|
|
||||||
|_calculation_mechanics_|______________________expected_sum_______________________|
|
|
||||||
```
|
|
||||||
|
|
||||||
## Versioning
|
|
||||||
|
|
||||||
Versioning was introduced [0] to accommodate differences in calculation needed,
|
|
||||||
and ability to maintain reverse compatibility.
|
|
||||||
|
|
||||||
The general algorithm will be describe further in the 'Calculation'.
|
|
||||||
|
|
||||||
### Version0
|
|
||||||
|
|
||||||
This is the initial version of TarSum.
|
|
||||||
|
|
||||||
Its element in the TarSum checksum string is `tarsum`.
|
|
||||||
|
|
||||||
### Version1
|
|
||||||
|
|
||||||
Its element in the TarSum checksum is `tarsum.v1`.
|
|
||||||
|
|
||||||
The notable changes in this version:
|
|
||||||
* Exclusion of file `mtime` from the file information headers, in each file
|
|
||||||
checksum calculation
|
|
||||||
* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
|
|
||||||
tar file info headers) keys and values in each file checksum calculation
|
|
||||||
|
|
||||||
### VersionDev
|
|
||||||
|
|
||||||
*Do not use unless validating refinements to the checksum algorithm*
|
|
||||||
|
|
||||||
Its element in the TarSum checksum is `tarsum.dev`.
|
|
||||||
|
|
||||||
This is a floating place holder for a next version and grounds for testing
|
|
||||||
changes. The methods used for calculation are subject to change without notice,
|
|
||||||
and this version is for testing and not for production use.
|
|
||||||
|
|
||||||
## Ciphers
|
|
||||||
|
|
||||||
The official default and standard hashing cipher used in the calculation mechanic
|
|
||||||
is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
|
|
||||||
|
|
||||||
Though the TarSum algorithm itself is not exclusively bound to the single
|
|
||||||
hashing cipher `sha256`, support for alternate hashing ciphers was later added
|
|
||||||
[1]. Use cases for alternate cipher could include future-proofing TarSum
|
|
||||||
checksum format and using faster cipher hashes for tar filesystem checksums.
|
|
||||||
|
|
||||||
## Calculation
|
|
||||||
|
|
||||||
### Requirement
|
|
||||||
|
|
||||||
As mentioned earlier, the calculation is such that it takes into consideration
|
|
||||||
the lifecycle of the tar archive. In that the tar archive is not an immutable,
|
|
||||||
permanent artifact. Otherwise options like relying on a known hashing cipher
|
|
||||||
checksum of the archive itself would be reliable enough. The tar archive of the
|
|
||||||
filesystem is used as a transportation medium for Docker images, and the
|
|
||||||
archive is discarded once its contents are extracted. Therefore, for consistent
|
|
||||||
validation items such as order of files in the tar archive and time stamps are
|
|
||||||
subject to change once an image is received.
|
|
||||||
|
|
||||||
### Process
|
|
||||||
|
|
||||||
The method is typically iterative due to reading tar info headers from the
|
|
||||||
archive stream, though this is not a strict requirement.
|
|
||||||
|
|
||||||
#### Files
|
|
||||||
|
|
||||||
Each file in the tar archive have their contents (headers and body) checksummed
|
|
||||||
individually using the designated associated hashing cipher. The ordered
|
|
||||||
headers of the file are written to the checksum calculation first, and then the
|
|
||||||
payload of the file body.
|
|
||||||
|
|
||||||
The resulting checksum of the file is appended to the list of file sums. The
|
|
||||||
sum is encoded as a string of the hexadecimal digest. Additionally, the file
|
|
||||||
name and position in the archive is kept as reference for special ordering.
|
|
||||||
|
|
||||||
#### Headers
|
|
||||||
|
|
||||||
The following headers are read, in this
|
|
||||||
order ( and the corresponding representation of its value):
|
|
||||||
* 'name' - string
|
|
||||||
* 'mode' - string of the base10 integer
|
|
||||||
* 'uid' - string of the integer
|
|
||||||
* 'gid' - string of the integer
|
|
||||||
* 'size' - string of the integer
|
|
||||||
* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
|
|
||||||
* 'typeflag' - string of the char
|
|
||||||
* 'linkname' - string
|
|
||||||
* 'uname' - string
|
|
||||||
* 'gname' - string
|
|
||||||
* 'devmajor' - string of the integer
|
|
||||||
* 'devminor' - string of the integer
|
|
||||||
|
|
||||||
For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
|
|
||||||
headers) included after the above list. These xattrs key/values are first
|
|
||||||
sorted by the keys.
|
|
||||||
|
|
||||||
#### Header Format
|
|
||||||
|
|
||||||
The ordered headers are written to the hash in the format of
|
|
||||||
|
|
||||||
"{.key}{.value}"
|
|
||||||
|
|
||||||
with no newline.
|
|
||||||
|
|
||||||
#### Body
|
|
||||||
|
|
||||||
After the order headers of the file have been added to the checksum for the
|
|
||||||
file, the body of the file is written to the hash.
|
|
||||||
|
|
||||||
#### List of file sums
|
|
||||||
|
|
||||||
The list of file sums is sorted by the string of the hexadecimal digest.
|
|
||||||
|
|
||||||
If there are two files in the tar with matching paths, the order of occurrence
|
|
||||||
for that path is reflected for the sums of the corresponding file header and
|
|
||||||
body.
|
|
||||||
|
|
||||||
#### Final Checksum
|
|
||||||
|
|
||||||
Begin with a fresh or initial state of the associated hash cipher. If there is
|
|
||||||
additional payload to include in the TarSum calculation for the archive, it is
|
|
||||||
written first. Then each checksum from the ordered list of file sums is written
|
|
||||||
to the hash.
|
|
||||||
|
|
||||||
The resulting digest is formatted per the Elements of TarSum checksum,
|
|
||||||
including the TarSum version, the associated hash cipher and the hexadecimal
|
|
||||||
encoded checksum digest.
|
|
||||||
|
|
||||||
## Security Considerations
|
|
||||||
|
|
||||||
The initial version of TarSum has undergone one update that could invalidate
|
|
||||||
handcrafted tar archives. The tar archive format supports appending of files
|
|
||||||
with same names as prior files in the archive. The latter file will clobber the
|
|
||||||
prior file of the same path. Due to this the algorithm now accounts for files
|
|
||||||
with matching paths, and orders the list of file sums accordingly [3].
|
|
||||||
|
|
||||||
## Footnotes
|
|
||||||
|
|
||||||
* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
|
|
||||||
* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
|
|
||||||
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
|
|
||||||
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
|
|
||||||
|
|
||||||
## Acknowledgments
|
|
||||||
|
|
||||||
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
|
|
||||||
TarSum calculation.
|
|
||||||
|
|
158
vendor/github.com/docker/docker/pkg/tarsum/versioning.go
generated
vendored
158
vendor/github.com/docker/docker/pkg/tarsum/versioning.go
generated
vendored
@ -1,158 +0,0 @@
|
|||||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Version is used for versioning of the TarSum algorithm
|
|
||||||
// based on the prefix of the hash used
|
|
||||||
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
|
|
||||||
type Version int
|
|
||||||
|
|
||||||
// Prefix of "tarsum"
|
|
||||||
const (
|
|
||||||
Version0 Version = iota
|
|
||||||
Version1
|
|
||||||
// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
|
|
||||||
VersionDev
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteV1Header writes a tar header to a writer in V1 tarsum format.
|
|
||||||
func WriteV1Header(h *tar.Header, w io.Writer) {
|
|
||||||
for _, elem := range v1TarHeaderSelect(h) {
|
|
||||||
w.Write([]byte(elem[0] + elem[1]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionLabelForChecksum returns the label for the given tarsum
|
|
||||||
// checksum, i.e., everything before the first `+` character in
|
|
||||||
// the string or an empty string if no label separator is found.
|
|
||||||
func VersionLabelForChecksum(checksum string) string {
|
|
||||||
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
|
|
||||||
sepIndex := strings.Index(checksum, "+")
|
|
||||||
if sepIndex < 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return checksum[:sepIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersions gets a list of all known tarsum versions.
|
|
||||||
func GetVersions() []Version {
|
|
||||||
v := []Version{}
|
|
||||||
for k := range tarSumVersions {
|
|
||||||
v = append(v, k)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
tarSumVersions = map[Version]string{
|
|
||||||
Version0: "tarsum",
|
|
||||||
Version1: "tarsum.v1",
|
|
||||||
VersionDev: "tarsum.dev",
|
|
||||||
}
|
|
||||||
tarSumVersionsByName = map[string]Version{
|
|
||||||
"tarsum": Version0,
|
|
||||||
"tarsum.v1": Version1,
|
|
||||||
"tarsum.dev": VersionDev,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (tsv Version) String() string {
|
|
||||||
return tarSumVersions[tsv]
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersionFromTarsum returns the Version from the provided string.
|
|
||||||
func GetVersionFromTarsum(tarsum string) (Version, error) {
|
|
||||||
tsv := tarsum
|
|
||||||
if strings.Contains(tarsum, "+") {
|
|
||||||
tsv = strings.SplitN(tarsum, "+", 2)[0]
|
|
||||||
}
|
|
||||||
for v, s := range tarSumVersions {
|
|
||||||
if s == tsv {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, ErrNotVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors that may be returned by functions in this package
|
|
||||||
var (
|
|
||||||
ErrNotVersion = errors.New("string does not include a TarSum Version")
|
|
||||||
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
|
|
||||||
)
|
|
||||||
|
|
||||||
// tarHeaderSelector is the interface which different versions
|
|
||||||
// of tarsum should use for selecting and ordering tar headers
|
|
||||||
// for each item in the archive.
|
|
||||||
type tarHeaderSelector interface {
|
|
||||||
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
|
|
||||||
|
|
||||||
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
|
|
||||||
return f(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
|
||||||
return [][2]string{
|
|
||||||
{"name", h.Name},
|
|
||||||
{"mode", strconv.FormatInt(h.Mode, 10)},
|
|
||||||
{"uid", strconv.Itoa(h.Uid)},
|
|
||||||
{"gid", strconv.Itoa(h.Gid)},
|
|
||||||
{"size", strconv.FormatInt(h.Size, 10)},
|
|
||||||
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
|
|
||||||
{"typeflag", string([]byte{h.Typeflag})},
|
|
||||||
{"linkname", h.Linkname},
|
|
||||||
{"uname", h.Uname},
|
|
||||||
{"gname", h.Gname},
|
|
||||||
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
|
|
||||||
{"devminor", strconv.FormatInt(h.Devminor, 10)},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
|
||||||
// Get extended attributes.
|
|
||||||
xAttrKeys := make([]string, len(h.Xattrs))
|
|
||||||
for k := range h.Xattrs {
|
|
||||||
xAttrKeys = append(xAttrKeys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(xAttrKeys)
|
|
||||||
|
|
||||||
// Make the slice with enough capacity to hold the 11 basic headers
|
|
||||||
// we want from the v0 selector plus however many xattrs we have.
|
|
||||||
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
|
|
||||||
|
|
||||||
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
|
|
||||||
v0headers := v0TarHeaderSelect(h)
|
|
||||||
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
|
|
||||||
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
|
|
||||||
|
|
||||||
// Finally, append the sorted xattrs.
|
|
||||||
for _, k := range xAttrKeys {
|
|
||||||
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
|
|
||||||
Version0: v0TarHeaderSelect,
|
|
||||||
Version1: v1TarHeaderSelect,
|
|
||||||
VersionDev: v1TarHeaderSelect,
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
|
|
||||||
headerSelector, ok := registeredHeaderSelectors[v]
|
|
||||||
if !ok {
|
|
||||||
return nil, ErrVersionNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
return headerSelector, nil
|
|
||||||
}
|
|
22
vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type writeCloseFlusher interface {
|
|
||||||
io.WriteCloser
|
|
||||||
Flush() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type nopCloseFlusher struct {
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *nopCloseFlusher) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *nopCloseFlusher) Flush() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
64
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
64
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package registry // import "github.com/docker/docker/registry"
|
package registry // import "github.com/docker/docker/registry"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
@ -12,7 +11,6 @@ import (
|
|||||||
"github.com/docker/distribution/registry/client/transport"
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -22,51 +20,6 @@ const (
|
|||||||
AuthClientID = "docker"
|
AuthClientID = "docker"
|
||||||
)
|
)
|
||||||
|
|
||||||
// loginV1 tries to register/login to the v1 registry server.
|
|
||||||
func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) {
|
|
||||||
registryEndpoint := apiEndpoint.ToV1Endpoint(userAgent, nil)
|
|
||||||
serverAddress := registryEndpoint.String()
|
|
||||||
|
|
||||||
logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress)
|
|
||||||
|
|
||||||
if serverAddress == "" {
|
|
||||||
return "", "", errdefs.System(errors.New("server Error: Server Address not set"))
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, serverAddress+"users/", nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
|
||||||
resp, err := registryEndpoint.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
// fallback when request could not be completed
|
|
||||||
return "", "", fallbackError{
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", errdefs.System(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case http.StatusOK:
|
|
||||||
return "Login Succeeded", "", nil
|
|
||||||
case http.StatusUnauthorized:
|
|
||||||
return "", "", errdefs.Unauthorized(errors.New("Wrong login/password, please try again"))
|
|
||||||
case http.StatusForbidden:
|
|
||||||
// *TODO: Use registry configuration to determine what this says, if anything?
|
|
||||||
return "", "", errdefs.Forbidden(errors.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress))
|
|
||||||
case http.StatusInternalServerError:
|
|
||||||
logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
|
|
||||||
return "", "", errdefs.System(errors.New("Internal Server Error"))
|
|
||||||
}
|
|
||||||
return "", "", errdefs.System(errors.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
|
||||||
resp.StatusCode, resp.Header))
|
|
||||||
}
|
|
||||||
|
|
||||||
type loginCredentialStore struct {
|
type loginCredentialStore struct {
|
||||||
authConfig *types.AuthConfig
|
authConfig *types.AuthConfig
|
||||||
}
|
}
|
||||||
@ -124,22 +77,21 @@ func (err fallbackError) Error() string {
|
|||||||
// endpoint will be pinged to get authorization challenges. These challenges
|
// endpoint will be pinged to get authorization challenges. These challenges
|
||||||
// will be used to authenticate against the registry to validate credentials.
|
// will be used to authenticate against the registry to validate credentials.
|
||||||
func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
|
func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
|
||||||
logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/")
|
var (
|
||||||
|
endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
|
||||||
|
modifiers = Headers(userAgent, nil)
|
||||||
|
authTransport = transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
|
||||||
|
credentialAuthConfig = *authConfig
|
||||||
|
creds = loginCredentialStore{authConfig: &credentialAuthConfig}
|
||||||
|
)
|
||||||
|
|
||||||
modifiers := Headers(userAgent, nil)
|
logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr)
|
||||||
authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
|
|
||||||
|
|
||||||
credentialAuthConfig := *authConfig
|
|
||||||
creds := loginCredentialStore{
|
|
||||||
authConfig: &credentialAuthConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil)
|
loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
|
|
||||||
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
|
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !foundV2 {
|
if !foundV2 {
|
||||||
|
15
vendor/github.com/docker/docker/registry/config.go
generated
vendored
15
vendor/github.com/docker/docker/registry/config.go
generated
vendored
@ -26,7 +26,7 @@ type serviceConfig struct {
|
|||||||
registrytypes.ServiceConfig
|
registrytypes.ServiceConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
const (
|
||||||
// DefaultNamespace is the default namespace
|
// DefaultNamespace is the default namespace
|
||||||
DefaultNamespace = "docker.io"
|
DefaultNamespace = "docker.io"
|
||||||
// DefaultRegistryVersionHeader is the name of the default HTTP header
|
// DefaultRegistryVersionHeader is the name of the default HTTP header
|
||||||
@ -39,29 +39,26 @@ var (
|
|||||||
IndexServer = "https://" + IndexHostname + "/v1/"
|
IndexServer = "https://" + IndexHostname + "/v1/"
|
||||||
// IndexName is the name of the index
|
// IndexName is the name of the index
|
||||||
IndexName = "docker.io"
|
IndexName = "docker.io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
// DefaultV2Registry is the URI of the default v2 registry
|
// DefaultV2Registry is the URI of the default v2 registry
|
||||||
DefaultV2Registry = &url.URL{
|
DefaultV2Registry = &url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
Host: "registry-1.docker.io",
|
Host: "registry-1.docker.io",
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrInvalidRepositoryName is an error returned if the repository name did
|
// ErrInvalidRepositoryName is an error returned if the repository name did
|
||||||
// not have the correct form
|
// not have the correct form
|
||||||
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
|
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
|
||||||
|
|
||||||
emptyServiceConfig, _ = newServiceConfig(ServiceOptions{})
|
emptyServiceConfig, _ = newServiceConfig(ServiceOptions{})
|
||||||
)
|
validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`)
|
||||||
|
|
||||||
var (
|
// for mocking in unit tests
|
||||||
validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`)
|
lookupIP = net.LookupIP
|
||||||
)
|
)
|
||||||
|
|
||||||
// for mocking in unit tests
|
|
||||||
var lookupIP = net.LookupIP
|
|
||||||
|
|
||||||
// newServiceConfig returns a new instance of ServiceConfig
|
// newServiceConfig returns a new instance of ServiceConfig
|
||||||
func newServiceConfig(options ServiceOptions) (*serviceConfig, error) {
|
func newServiceConfig(options ServiceOptions) (*serviceConfig, error) {
|
||||||
config := &serviceConfig{
|
config := &serviceConfig{
|
||||||
|
8
vendor/github.com/docker/docker/registry/errors.go
generated
vendored
8
vendor/github.com/docker/docker/registry/errors.go
generated
vendored
@ -7,14 +7,6 @@ import (
|
|||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type notFoundError string
|
|
||||||
|
|
||||||
func (e notFoundError) Error() string {
|
|
||||||
return string(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (notFoundError) NotFound() {}
|
|
||||||
|
|
||||||
func translateV2AuthError(err error) error {
|
func translateV2AuthError(err error) error {
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case *url.Error:
|
case *url.Error:
|
||||||
|
96
vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go
generated
vendored
96
vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go
generated
vendored
@ -1,96 +0,0 @@
|
|||||||
package resumable // import "github.com/docker/docker/registry/resumable"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type requestReader struct {
|
|
||||||
client *http.Client
|
|
||||||
request *http.Request
|
|
||||||
lastRange int64
|
|
||||||
totalSize int64
|
|
||||||
currentResponse *http.Response
|
|
||||||
failures uint32
|
|
||||||
maxFailures uint32
|
|
||||||
waitDuration time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestReader makes it possible to resume reading a request's body transparently
|
|
||||||
// maxfail is the number of times we retry to make requests again (not resumes)
|
|
||||||
// totalsize is the total length of the body; auto detect if not provided
|
|
||||||
func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
|
|
||||||
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestReaderWithInitialResponse makes it possible to resume
|
|
||||||
// reading the body of an already initiated request.
|
|
||||||
func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
|
|
||||||
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *requestReader) Read(p []byte) (n int, err error) {
|
|
||||||
if r.client == nil || r.request == nil {
|
|
||||||
return 0, fmt.Errorf("client and request can't be nil")
|
|
||||||
}
|
|
||||||
isFreshRequest := false
|
|
||||||
if r.lastRange != 0 && r.currentResponse == nil {
|
|
||||||
readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
|
|
||||||
r.request.Header.Set("Range", readRange)
|
|
||||||
time.Sleep(r.waitDuration)
|
|
||||||
}
|
|
||||||
if r.currentResponse == nil {
|
|
||||||
r.currentResponse, err = r.client.Do(r.request)
|
|
||||||
isFreshRequest = true
|
|
||||||
}
|
|
||||||
if err != nil && r.failures+1 != r.maxFailures {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
r.failures++
|
|
||||||
time.Sleep(time.Duration(r.failures) * r.waitDuration)
|
|
||||||
return 0, nil
|
|
||||||
} else if err != nil {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if r.currentResponse.StatusCode == http.StatusRequestedRangeNotSatisfiable && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
return 0, io.EOF
|
|
||||||
} else if r.currentResponse.StatusCode != http.StatusPartialContent && r.lastRange != 0 && isFreshRequest {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
return 0, fmt.Errorf("the server doesn't support byte ranges")
|
|
||||||
}
|
|
||||||
if r.totalSize == 0 {
|
|
||||||
r.totalSize = r.currentResponse.ContentLength
|
|
||||||
} else if r.totalSize <= 0 {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
return 0, fmt.Errorf("failed to auto detect content length")
|
|
||||||
}
|
|
||||||
n, err = r.currentResponse.Body.Read(p)
|
|
||||||
r.lastRange += int64(n)
|
|
||||||
if err != nil {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
}
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *requestReader) Close() error {
|
|
||||||
r.cleanUpResponse()
|
|
||||||
r.client = nil
|
|
||||||
r.request = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *requestReader) cleanUpResponse() {
|
|
||||||
if r.currentResponse != nil {
|
|
||||||
r.currentResponse.Body.Close()
|
|
||||||
r.currentResponse = nil
|
|
||||||
}
|
|
||||||
}
|
|
80
vendor/github.com/docker/docker/registry/service.go
generated
vendored
80
vendor/github.com/docker/docker/registry/service.go
generated
vendored
@ -108,36 +108,35 @@ func (s *DefaultService) LoadInsecureRegistries(registries []string) error {
|
|||||||
// It can be used to verify the validity of a client's credentials.
|
// It can be used to verify the validity of a client's credentials.
|
||||||
func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) {
|
func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) {
|
||||||
// TODO Use ctx when searching for repositories
|
// TODO Use ctx when searching for repositories
|
||||||
serverAddress := authConfig.ServerAddress
|
var registryHostName = IndexHostname
|
||||||
if serverAddress == "" {
|
|
||||||
serverAddress = IndexServer
|
if authConfig.ServerAddress != "" {
|
||||||
}
|
serverAddress := authConfig.ServerAddress
|
||||||
if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") {
|
if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") {
|
||||||
serverAddress = "https://" + serverAddress
|
serverAddress = "https://" + serverAddress
|
||||||
}
|
}
|
||||||
u, err := url.Parse(serverAddress)
|
u, err := url.Parse(serverAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err))
|
return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err))
|
||||||
|
}
|
||||||
|
registryHostName = u.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := s.LookupPushEndpoints(u.Host)
|
// Lookup endpoints for authentication using "LookupPushEndpoints", which
|
||||||
|
// excludes mirrors to prevent sending credentials of the upstream registry
|
||||||
|
// to a mirror.
|
||||||
|
endpoints, err := s.LookupPushEndpoints(registryHostName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", errdefs.InvalidParameter(err)
|
return "", "", errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
login := loginV2
|
status, token, err = loginV2(authConfig, endpoint, userAgent)
|
||||||
if endpoint.Version == APIVersion1 {
|
|
||||||
login = loginV1
|
|
||||||
}
|
|
||||||
|
|
||||||
status, token, err = login(authConfig, endpoint, userAgent)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if fErr, ok := err.(fallbackError); ok {
|
if fErr, ok := err.(fallbackError); ok {
|
||||||
err = fErr.err
|
logrus.WithError(fErr.err).Infof("Error logging in to endpoint, trying next endpoint")
|
||||||
logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,18 +149,13 @@ func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig,
|
|||||||
// splitReposSearchTerm breaks a search term into an index name and remote name
|
// splitReposSearchTerm breaks a search term into an index name and remote name
|
||||||
func splitReposSearchTerm(reposName string) (string, string) {
|
func splitReposSearchTerm(reposName string) (string, string) {
|
||||||
nameParts := strings.SplitN(reposName, "/", 2)
|
nameParts := strings.SplitN(reposName, "/", 2)
|
||||||
var indexName, remoteName string
|
|
||||||
if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") &&
|
if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") &&
|
||||||
!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
|
!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
|
||||||
// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
|
// This is a Docker Hub repository (ex: samalba/hipache or ubuntu),
|
||||||
// 'docker.io'
|
// use the default Docker Hub registry (docker.io)
|
||||||
indexName = IndexName
|
return IndexName, reposName
|
||||||
remoteName = reposName
|
|
||||||
} else {
|
|
||||||
indexName = nameParts[0]
|
|
||||||
remoteName = nameParts[1]
|
|
||||||
}
|
}
|
||||||
return indexName, remoteName
|
return nameParts[0], nameParts[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search queries the public registry for images matching the specified
|
// Search queries the public registry for images matching the specified
|
||||||
@ -184,7 +178,7 @@ func (s *DefaultService) Search(ctx context.Context, term string, limit int, aut
|
|||||||
}
|
}
|
||||||
|
|
||||||
// *TODO: Search multiple indexes.
|
// *TODO: Search multiple indexes.
|
||||||
endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers))
|
endpoint, err := NewV1Endpoint(index, userAgent, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -228,13 +222,8 @@ func (s *DefaultService) Search(ctx context.Context, term string, limit int, aut
|
|||||||
r := newSession(client, authConfig, endpoint)
|
r := newSession(client, authConfig, endpoint)
|
||||||
|
|
||||||
if index.Official {
|
if index.Official {
|
||||||
localName := remoteName
|
// If pull "library/foo", it's stored locally under "foo"
|
||||||
if strings.HasPrefix(localName, "library/") {
|
remoteName = strings.TrimPrefix(remoteName, "library/")
|
||||||
// If pull "library/foo", it's stored locally under "foo"
|
|
||||||
localName = strings.SplitN(localName, "/", 2)[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.SearchRepositories(localName, limit)
|
|
||||||
}
|
}
|
||||||
return r.SearchRepositories(remoteName, limit)
|
return r.SearchRepositories(remoteName, limit)
|
||||||
}
|
}
|
||||||
@ -259,6 +248,7 @@ type APIEndpoint struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
|
// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
|
||||||
|
// Deprecated: this function is deprecated and will be removed in a future update
|
||||||
func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint {
|
func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint {
|
||||||
return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
|
return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
|
||||||
}
|
}
|
||||||
@ -280,24 +270,22 @@ func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, er
|
|||||||
return s.tlsConfig(mirrorURL.Host)
|
return s.tlsConfig(mirrorURL.Host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference.
|
// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference.
|
||||||
// It gives preference to v2 endpoints over v1, mirrors over the actual
|
// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP.
|
||||||
// registry, and HTTPS over plain HTTP.
|
|
||||||
func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
return s.lookupEndpoints(hostname)
|
return s.lookupV2Endpoints(hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference.
|
// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference.
|
||||||
// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP.
|
// It gives preference to HTTPS over plain HTTP. Mirrors are not included.
|
||||||
// Mirrors are not included.
|
|
||||||
func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
allEndpoints, err := s.lookupEndpoints(hostname)
|
allEndpoints, err := s.lookupV2Endpoints(hostname)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, endpoint := range allEndpoints {
|
for _, endpoint := range allEndpoints {
|
||||||
if !endpoint.Mirror {
|
if !endpoint.Mirror {
|
||||||
@ -307,7 +295,3 @@ func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEn
|
|||||||
}
|
}
|
||||||
return endpoints, err
|
return endpoints, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
|
||||||
return s.lookupV2Endpoints(hostname)
|
|
||||||
}
|
|
||||||
|
5
vendor/github.com/docker/docker/registry/service_v2.go
generated
vendored
5
vendor/github.com/docker/docker/registry/service_v2.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
|||||||
func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||||
tlsConfig := tlsconfig.ServerDefault()
|
tlsConfig := tlsconfig.ServerDefault()
|
||||||
if hostname == DefaultNamespace || hostname == IndexHostname {
|
if hostname == DefaultNamespace || hostname == IndexHostname {
|
||||||
// v2 mirrors
|
|
||||||
for _, mirror := range s.config.Mirrors {
|
for _, mirror := range s.config.Mirrors {
|
||||||
if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") {
|
if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") {
|
||||||
mirror = "https://" + mirror
|
mirror = "https://" + mirror
|
||||||
@ -24,15 +23,13 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
endpoints = append(endpoints, APIEndpoint{
|
endpoints = append(endpoints, APIEndpoint{
|
||||||
URL: mirrorURL,
|
URL: mirrorURL,
|
||||||
// guess mirrors are v2
|
|
||||||
Version: APIVersion2,
|
Version: APIVersion2,
|
||||||
Mirror: true,
|
Mirror: true,
|
||||||
TrimHostname: true,
|
TrimHostname: true,
|
||||||
TLSConfig: mirrorTLSConfig,
|
TLSConfig: mirrorTLSConfig,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// v2 registry
|
|
||||||
endpoints = append(endpoints, APIEndpoint{
|
endpoints = append(endpoints, APIEndpoint{
|
||||||
URL: DefaultV2Registry,
|
URL: DefaultV2Registry,
|
||||||
Version: APIVersion2,
|
Version: APIVersion2,
|
||||||
|
563
vendor/github.com/docker/docker/registry/session.go
generated
vendored
563
vendor/github.com/docker/docker/registry/session.go
generated
vendored
@ -1,43 +1,26 @@
|
|||||||
package registry // import "github.com/docker/docker/registry"
|
package registry // import "github.com/docker/docker/registry"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
|
|
||||||
// this is required for some certificates
|
// this is required for some certificates
|
||||||
_ "crypto/sha512"
|
_ "crypto/sha512"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/cookiejar"
|
"net/http/cookiejar"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/docker/docker/pkg/tarsum"
|
|
||||||
"github.com/docker/docker/registry/resumable"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrRepoNotFound is returned if the repository didn't exist on the
|
|
||||||
// remote side
|
|
||||||
ErrRepoNotFound notFoundError = "Repository not found"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Session is used to communicate with a V1 registry
|
// A Session is used to communicate with a V1 registry
|
||||||
type Session struct {
|
type Session struct {
|
||||||
indexEndpoint *V1Endpoint
|
indexEndpoint *V1Endpoint
|
||||||
@ -214,527 +197,6 @@ func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1E
|
|||||||
return newSession(client, authConfig, endpoint), nil
|
return newSession(client, authConfig, endpoint), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns this registry session's ID.
|
|
||||||
func (r *Session) ID() string {
|
|
||||||
return r.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRemoteHistory retrieves the history of a given image from the registry.
|
|
||||||
// It returns a list of the parent's JSON files (including the requested image).
|
|
||||||
func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) {
|
|
||||||
res, err := r.client.Get(registry + "images/" + imgID + "/ancestry")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
if res.StatusCode == http.StatusUnauthorized {
|
|
||||||
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
|
|
||||||
}
|
|
||||||
return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
|
|
||||||
}
|
|
||||||
|
|
||||||
var history []string
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&history); err != nil {
|
|
||||||
return nil, fmt.Errorf("Error while reading the http response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("Ancestry: %v", history)
|
|
||||||
return history, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupRemoteImage checks if an image exists in the registry
|
|
||||||
func (r *Session) LookupRemoteImage(imgID, registry string) error {
|
|
||||||
res, err := r.client.Get(registry + "images/" + imgID + "/json")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
|
|
||||||
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) {
|
|
||||||
res, err := r.client.Get(registry + "images/" + imgID + "/json")
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
|
|
||||||
}
|
|
||||||
// if the size header is not present, then set it to '-1'
|
|
||||||
imageSize := int64(-1)
|
|
||||||
if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
|
|
||||||
imageSize, err = strconv.ParseInt(hdr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonString, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString)
|
|
||||||
}
|
|
||||||
return jsonString, imageSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRemoteImageLayer retrieves an image layer from the registry
|
|
||||||
func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) {
|
|
||||||
var (
|
|
||||||
statusCode = 0
|
|
||||||
res *http.Response
|
|
||||||
err error
|
|
||||||
imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID)
|
|
||||||
)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, imageURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error while getting from the server: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err = r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Error contacting registry %s: %v", registry, err)
|
|
||||||
// the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515
|
|
||||||
if res != nil {
|
|
||||||
if res.Body != nil {
|
|
||||||
res.Body.Close()
|
|
||||||
}
|
|
||||||
statusCode = res.StatusCode
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
|
|
||||||
statusCode, imgID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
res.Body.Close()
|
|
||||||
return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
|
|
||||||
res.StatusCode, imgID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
|
|
||||||
logrus.Debug("server supports resume")
|
|
||||||
return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
|
|
||||||
}
|
|
||||||
logrus.Debug("server doesn't support resume")
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRemoteTag retrieves the tag named in the askedTag argument from the given
|
|
||||||
// repository. It queries each of the registries supplied in the registries
|
|
||||||
// argument, and returns data from the first one that answers the query
|
|
||||||
// successfully.
|
|
||||||
func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) {
|
|
||||||
repository := reference.Path(repositoryRef)
|
|
||||||
|
|
||||||
if strings.Count(repository, "/") == 0 {
|
|
||||||
// This will be removed once the registry supports auto-resolution on
|
|
||||||
// the "library" namespace
|
|
||||||
repository = "library/" + repository
|
|
||||||
}
|
|
||||||
for _, host := range registries {
|
|
||||||
endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag)
|
|
||||||
res, err := r.client.Get(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
if res.StatusCode == 404 {
|
|
||||||
return "", ErrRepoNotFound
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var tagID string
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return tagID, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Could not reach any registry endpoint")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRemoteTags retrieves all tags from the given repository. It queries each
|
|
||||||
// of the registries supplied in the registries argument, and returns data from
|
|
||||||
// the first one that answers the query successfully. It returns a map with
|
|
||||||
// tag names as the keys and image IDs as the values.
|
|
||||||
func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) {
|
|
||||||
repository := reference.Path(repositoryRef)
|
|
||||||
|
|
||||||
if strings.Count(repository, "/") == 0 {
|
|
||||||
// This will be removed once the registry supports auto-resolution on
|
|
||||||
// the "library" namespace
|
|
||||||
repository = "library/" + repository
|
|
||||||
}
|
|
||||||
for _, host := range registries {
|
|
||||||
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
|
|
||||||
res, err := r.client.Get(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
if res.StatusCode == 404 {
|
|
||||||
return nil, ErrRepoNotFound
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make(map[string]string)
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Could not reach any registry endpoint")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
|
|
||||||
var endpoints []string
|
|
||||||
parsedURL, err := url.Parse(indexEp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var urlScheme = parsedURL.Scheme
|
|
||||||
// The registry's URL scheme has to match the Index'
|
|
||||||
for _, ep := range headers {
|
|
||||||
epList := strings.Split(ep, ",")
|
|
||||||
for _, epListElement := range epList {
|
|
||||||
endpoints = append(
|
|
||||||
endpoints,
|
|
||||||
fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endpoints, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRepositoryData returns lists of images and endpoints for the repository
|
|
||||||
func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) {
|
|
||||||
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name))
|
|
||||||
|
|
||||||
logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, repositoryTarget, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
|
|
||||||
req.Header.Set("X-Docker-Token", "true")
|
|
||||||
res, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
// check if the error is because of i/o timeout
|
|
||||||
// and return a non-obtuse error message for users
|
|
||||||
// "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout"
|
|
||||||
// was a top search on the docker user forum
|
|
||||||
if isTimeout(err) {
|
|
||||||
return nil, fmt.Errorf("network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy", repositoryTarget)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Error while pulling image: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusUnauthorized {
|
|
||||||
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
|
|
||||||
}
|
|
||||||
// TODO: Right now we're ignoring checksums in the response body.
|
|
||||||
// In the future, we need to use them to check image validity.
|
|
||||||
if res.StatusCode == 404 {
|
|
||||||
return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
|
|
||||||
} else if res.StatusCode != http.StatusOK {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Error reading response body: %s", err)
|
|
||||||
}
|
|
||||||
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res)
|
|
||||||
}
|
|
||||||
|
|
||||||
var endpoints []string
|
|
||||||
if res.Header.Get("X-Docker-Endpoints") != "" {
|
|
||||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Assume the endpoint is on the same host
|
|
||||||
endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host))
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteChecksums := []*ImgData{}
|
|
||||||
if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forge a better object from the retrieved data
|
|
||||||
imgsData := make(map[string]*ImgData, len(remoteChecksums))
|
|
||||||
for _, elem := range remoteChecksums {
|
|
||||||
imgsData[elem.ID] = elem
|
|
||||||
}
|
|
||||||
|
|
||||||
return &RepositoryData{
|
|
||||||
ImgList: imgsData,
|
|
||||||
Endpoints: endpoints,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushImageChecksumRegistry uploads checksums for an image
|
|
||||||
func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error {
|
|
||||||
u := registry + "images/" + imgData.ID + "/checksum"
|
|
||||||
|
|
||||||
logrus.Debugf("[registry] Calling PUT %s", u)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPut, u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
|
|
||||||
req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
|
|
||||||
|
|
||||||
res, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to upload metadata: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if len(res.Cookies()) > 0 {
|
|
||||||
r.client.Jar.SetCookies(req.URL, res.Cookies())
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
|
|
||||||
}
|
|
||||||
var jsonBody map[string]string
|
|
||||||
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
|
|
||||||
errBody = []byte(err.Error())
|
|
||||||
} else if jsonBody["error"] == "Image already exists" {
|
|
||||||
return ErrAlreadyExists
|
|
||||||
}
|
|
||||||
return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushImageJSONRegistry pushes JSON metadata for a local image to the registry
|
|
||||||
func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error {
|
|
||||||
|
|
||||||
u := registry + "images/" + imgData.ID + "/json"
|
|
||||||
|
|
||||||
logrus.Debugf("[registry] Calling PUT %s", u)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPut, u, bytes.NewReader(jsonRaw))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Add("Content-type", "application/json")
|
|
||||||
|
|
||||||
res, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to upload metadata: %s", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusUnauthorized && strings.HasPrefix(registry, "http://") {
|
|
||||||
return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
|
|
||||||
}
|
|
||||||
var jsonBody map[string]string
|
|
||||||
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
|
|
||||||
errBody = []byte(err.Error())
|
|
||||||
} else if jsonBody["error"] == "Image already exists" {
|
|
||||||
return ErrAlreadyExists
|
|
||||||
}
|
|
||||||
return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushImageLayerRegistry sends the checksum of an image layer to the registry
|
|
||||||
func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
|
|
||||||
u := registry + "images/" + imgID + "/layer"
|
|
||||||
|
|
||||||
logrus.Debugf("[registry] Calling PUT %s", u)
|
|
||||||
|
|
||||||
tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
h := sha256.New()
|
|
||||||
h.Write(jsonRaw)
|
|
||||||
h.Write([]byte{'\n'})
|
|
||||||
checksumLayer := io.TeeReader(tarsumLayer, h)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPut, u, checksumLayer)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
req.Header.Add("Content-Type", "application/octet-stream")
|
|
||||||
req.ContentLength = -1
|
|
||||||
req.TransferEncoding = []string{"chunked"}
|
|
||||||
res, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("Failed to upload layer: %v", err)
|
|
||||||
}
|
|
||||||
if rc, ok := layer.(io.Closer); ok {
|
|
||||||
if err := rc.Close(); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
|
|
||||||
}
|
|
||||||
return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res)
|
|
||||||
}
|
|
||||||
|
|
||||||
checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil))
|
|
||||||
return tarsumLayer.Sum(jsonRaw), checksumPayload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushRegistryTag pushes a tag on the registry.
|
|
||||||
// Remote has the format '<user>/<repo>
|
|
||||||
func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error {
|
|
||||||
// "jsonify" the string
|
|
||||||
revision = "\"" + revision + "\""
|
|
||||||
path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPut, registry+path, strings.NewReader(revision))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Add("Content-type", "application/json")
|
|
||||||
req.ContentLength = int64(len(revision))
|
|
||||||
res, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
|
||||||
return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushImageJSONIndex uploads an image list to the repository
|
|
||||||
func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
|
|
||||||
cleanImgList := []*ImgData{}
|
|
||||||
if validate {
|
|
||||||
for _, elem := range imgList {
|
|
||||||
if elem.Checksum != "" {
|
|
||||||
cleanImgList = append(cleanImgList, elem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cleanImgList = imgList
|
|
||||||
}
|
|
||||||
|
|
||||||
imgListJSON, err := json.Marshal(cleanImgList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var suffix string
|
|
||||||
if validate {
|
|
||||||
suffix = "images"
|
|
||||||
}
|
|
||||||
u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix)
|
|
||||||
logrus.Debugf("[registry] PUT %s", u)
|
|
||||||
logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
|
|
||||||
headers := map[string][]string{
|
|
||||||
"Content-type": {"application/json"},
|
|
||||||
// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
|
|
||||||
"X-Docker-Token": {"true"},
|
|
||||||
}
|
|
||||||
if validate {
|
|
||||||
headers["X-Docker-Endpoints"] = regs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Redirect if necessary
|
|
||||||
var res *http.Response
|
|
||||||
for {
|
|
||||||
if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !shouldRedirect(res) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
u = res.Header.Get("Location")
|
|
||||||
logrus.Debugf("Redirected to %s", u)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
if res.StatusCode == http.StatusUnauthorized {
|
|
||||||
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tokens, endpoints []string
|
|
||||||
if !validate {
|
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Error reading response body: %s", err)
|
|
||||||
}
|
|
||||||
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res)
|
|
||||||
}
|
|
||||||
tokens = res.Header["X-Docker-Token"]
|
|
||||||
logrus.Debugf("Auth token: %v", tokens)
|
|
||||||
|
|
||||||
if res.Header.Get("X-Docker-Endpoints") == "" {
|
|
||||||
return nil, fmt.Errorf("Index response didn't contain any endpoints")
|
|
||||||
}
|
|
||||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if res.StatusCode != http.StatusNoContent {
|
|
||||||
errBody, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Error reading response body: %s", err)
|
|
||||||
}
|
|
||||||
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &RepositoryData{
|
|
||||||
Endpoints: endpoints,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest(http.MethodPut, u, bytes.NewReader(body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.ContentLength = int64(len(body))
|
|
||||||
for k, v := range headers {
|
|
||||||
req.Header[k] = v
|
|
||||||
}
|
|
||||||
response, err := r.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRedirect(response *http.Response) bool {
|
|
||||||
return response.StatusCode >= 300 && response.StatusCode < 400
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchRepositories performs a search against the remote repository
|
// SearchRepositories performs a search against the remote repository
|
||||||
func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) {
|
func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) {
|
||||||
if limit < 1 || limit > 100 {
|
if limit < 1 || limit > 100 {
|
||||||
@ -755,28 +217,11 @@ func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.Sea
|
|||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res)
|
return nil, &jsonmessage.JSONError{
|
||||||
|
Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode),
|
||||||
|
Code: res.StatusCode,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
result := new(registrytypes.SearchResults)
|
result := new(registrytypes.SearchResults)
|
||||||
return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results")
|
return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results")
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTimeout(err error) bool {
|
|
||||||
type timeout interface {
|
|
||||||
Timeout() bool
|
|
||||||
}
|
|
||||||
e := err
|
|
||||||
switch urlErr := err.(type) {
|
|
||||||
case *url.Error:
|
|
||||||
e = urlErr.Err
|
|
||||||
}
|
|
||||||
t, ok := e.(timeout)
|
|
||||||
return ok && t.Timeout()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJSONError(msg string, res *http.Response) error {
|
|
||||||
return &jsonmessage.JSONError{
|
|
||||||
Message: msg,
|
|
||||||
Code: res.StatusCode,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
1290
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
1290
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
338
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
Normal file
338
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
Normal file
@ -0,0 +1,338 @@
|
|||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/struct.proto
|
||||||
|
|
||||||
|
package structpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
type NullValue int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Null value.
|
||||||
|
NullValue_NULL_VALUE NullValue = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var NullValue_name = map[int32]string{
|
||||||
|
0: "NULL_VALUE",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NullValue_value = map[string]int32{
|
||||||
|
"NULL_VALUE": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x NullValue) String() string {
|
||||||
|
return proto.EnumName(NullValue_name, int32(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NullValue) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
type Struct struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Struct) Reset() { *m = Struct{} }
|
||||||
|
func (m *Struct) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Struct) ProtoMessage() {}
|
||||||
|
func (*Struct) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Struct) XXX_WellKnownType() string { return "Struct" }
|
||||||
|
|
||||||
|
func (m *Struct) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Struct.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Struct.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Struct.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Struct.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Struct proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Struct) GetFields() map[string]*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Fields
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
type Value struct {
|
||||||
|
// The kind of value.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Kind:
|
||||||
|
// *Value_NullValue
|
||||||
|
// *Value_NumberValue
|
||||||
|
// *Value_StringValue
|
||||||
|
// *Value_BoolValue
|
||||||
|
// *Value_StructValue
|
||||||
|
// *Value_ListValue
|
||||||
|
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) Reset() { *m = Value{} }
|
||||||
|
func (m *Value) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Value) ProtoMessage() {}
|
||||||
|
func (*Value) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Value) XXX_WellKnownType() string { return "Value" }
|
||||||
|
|
||||||
|
func (m *Value) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Value.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Value.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Value.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Value.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Value proto.InternalMessageInfo
|
||||||
|
|
||||||
|
type isValue_Kind interface {
|
||||||
|
isValue_Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_NullValue struct {
|
||||||
|
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_NumberValue struct {
|
||||||
|
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_StringValue struct {
|
||||||
|
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_BoolValue struct {
|
||||||
|
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_StructValue struct {
|
||||||
|
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_ListValue struct {
|
||||||
|
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Value_NullValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_NumberValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_StringValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_BoolValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_StructValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_ListValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (m *Value) GetKind() isValue_Kind {
|
||||||
|
if m != nil {
|
||||||
|
return m.Kind
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNullValue() NullValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_NullValue); ok {
|
||||||
|
return x.NullValue
|
||||||
|
}
|
||||||
|
return NullValue_NULL_VALUE
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNumberValue() float64 {
|
||||||
|
if x, ok := m.GetKind().(*Value_NumberValue); ok {
|
||||||
|
return x.NumberValue
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStringValue() string {
|
||||||
|
if x, ok := m.GetKind().(*Value_StringValue); ok {
|
||||||
|
return x.StringValue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetBoolValue() bool {
|
||||||
|
if x, ok := m.GetKind().(*Value_BoolValue); ok {
|
||||||
|
return x.BoolValue
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStructValue() *Struct {
|
||||||
|
if x, ok := m.GetKind().(*Value_StructValue); ok {
|
||||||
|
return x.StructValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetListValue() *ListValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_ListValue); ok {
|
||||||
|
return x.ListValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
|
func (*Value) XXX_OneofWrappers() []interface{} {
|
||||||
|
return []interface{}{
|
||||||
|
(*Value_NullValue)(nil),
|
||||||
|
(*Value_NumberValue)(nil),
|
||||||
|
(*Value_StringValue)(nil),
|
||||||
|
(*Value_BoolValue)(nil),
|
||||||
|
(*Value_StructValue)(nil),
|
||||||
|
(*Value_ListValue)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
type ListValue struct {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ListValue) Reset() { *m = ListValue{} }
|
||||||
|
func (m *ListValue) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ListValue) ProtoMessage() {}
|
||||||
|
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
|
||||||
|
|
||||||
|
func (m *ListValue) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ListValue.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ListValue.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ListValue.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ListValue.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ListValue proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ListValue) GetValues() []*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Values
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
|
||||||
|
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
|
||||||
|
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
|
||||||
|
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
|
||||||
|
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_df322afd6c9fb402 = []byte{
|
||||||
|
// 417 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
||||||
|
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
|
||||||
|
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
|
||||||
|
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
|
||||||
|
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
|
||||||
|
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
|
||||||
|
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
|
||||||
|
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
|
||||||
|
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
|
||||||
|
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
|
||||||
|
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
|
||||||
|
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
|
||||||
|
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
|
||||||
|
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
|
||||||
|
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
|
||||||
|
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
|
||||||
|
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
|
||||||
|
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
|
||||||
|
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
|
||||||
|
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
|
||||||
|
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
|
||||||
|
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
|
||||||
|
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
|
||||||
|
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
|
||||||
|
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
|
||||||
|
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
|
||||||
|
0x00,
|
||||||
|
}
|
95
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
Normal file
95
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option cc_enable_arenas = true;
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "StructProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
message Struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
map<string, Value> fields = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
message Value {
|
||||||
|
// The kind of value.
|
||||||
|
oneof kind {
|
||||||
|
// Represents a null value.
|
||||||
|
NullValue null_value = 1;
|
||||||
|
// Represents a double value.
|
||||||
|
double number_value = 2;
|
||||||
|
// Represents a string value.
|
||||||
|
string string_value = 3;
|
||||||
|
// Represents a boolean value.
|
||||||
|
bool bool_value = 4;
|
||||||
|
// Represents a structured value.
|
||||||
|
Struct struct_value = 5;
|
||||||
|
// Represents a repeated `Value`.
|
||||||
|
ListValue list_value = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
enum NullValue {
|
||||||
|
// Null value.
|
||||||
|
NULL_VALUE = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
message ListValue {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
repeated Value values = 1;
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user