test: add basic integration tests

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell 2023-05-15 18:48:58 +01:00
parent e61a8cf637
commit 2d124e0ce9
73 changed files with 8537 additions and 2 deletions

View File

@ -3,6 +3,9 @@
ARG GO_VERSION=1.20
ARG XX_VERSION=1.1.2
ARG DOCKERD_VERSION=20.10.14
ARG GOTESTSUM_VERSION=v1.9.0
ARG REGISTRY_VERSION=2.8.0
ARG BUILDKIT_VERSION=v0.11.6
FROM docker:$DOCKERD_VERSION AS dockerd-release
@ -18,6 +21,17 @@ ENV GOFLAGS=-mod=vendor
ENV CGO_ENABLED=0
WORKDIR /src
FROM registry:$REGISTRY_VERSION AS registry
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
FROM gobase AS gotestsum
ARG GOTESTSUM_VERSION
ENV GOFLAGS=
RUN --mount=target=/root/.cache,type=cache \
GOBIN=/out/ go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" && \
/out/gotestsum --version
FROM gobase AS buildx-version
RUN --mount=type=bind,target=. <<EOT
set -e
@ -39,6 +53,7 @@ RUN --mount=type=bind,target=. \
EOT
FROM gobase AS test
ENV SKIP_INTEGRATION_TESTS=1
RUN --mount=type=bind,target=. \
--mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
@ -61,6 +76,17 @@ FROM binaries-$TARGETOS AS binaries
# enable scanning for this stage
ARG BUILDKIT_SBOM_SCAN_STAGE=true
FROM gobase AS integration-test-base
RUN apk add --no-cache docker runc containerd
COPY --link --from=gotestsum /out/gotestsum /usr/bin/
COPY --link --from=registry /bin/registry /usr/bin/
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
COPY --link --from=binaries /buildx /usr/bin/
FROM integration-test-base AS integration-test
COPY . .
# Release
FROM --platform=$BUILDPLATFORM alpine AS releaser
WORKDIR /work

View File

@ -156,3 +156,29 @@ target "image-local" {
inherits = ["image"]
output = ["type=docker"]
}
variable "HTTP_PROXY" {
default = ""
}
variable "HTTPS_PROXY" {
default = ""
}
variable "NO_PROXY" {
default = ""
}
target "integration-test-base" {
inherits = ["_common"]
args = {
HTTP_PROXY = HTTP_PROXY
HTTPS_PROXY = HTTPS_PROXY
NO_PROXY = NO_PROXY
}
target = "integration-test-base"
output = ["type=cacheonly"]
}
target "integration-test" {
inherits = ["integration-test-base"]
target = "integration-test"
}

4
go.mod
View File

@ -8,7 +8,9 @@ require (
github.com/compose-spec/compose-go v1.13.4
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.7.0
github.com/containerd/continuity v0.3.0
github.com/containerd/typeurl/v2 v2.1.0
github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa
github.com/docker/cli v23.0.6+incompatible
github.com/docker/cli-docs-tool v0.5.1
github.com/docker/distribution v2.8.2+incompatible
@ -82,11 +84,9 @@ require (
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/containerd/ttrpc v1.2.1 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.4.0 // indirect

62
hack/test Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -eu -o pipefail
: "${BUILDX_CMD=docker buildx}"
: "${CACHE_FROM=}"
: "${CACHE_TO=}"
: ${TEST_INTEGRATION=}
: ${TEST_REPORT_SUFFIX=}
: ${TEST_KEEP_CACHE=}
: ${TEST_DOCKERD=}
: ${TEST_BUILDKIT_IMAGE=}
if [ -n "$CACHE_FROM" ]; then
for cfrom in $CACHE_FROM; do
setFlags+=(--set "*.cache-from=$cfrom")
done
fi
if [ -n "$CACHE_TO" ]; then
for cto in $CACHE_TO; do
setFlags+=(--set "*.cache-to=$cto")
done
fi
if [ "$#" == 0 ]; then TEST_INTEGRATION=1; fi
while test $# -gt 0; do
case "$1" in
integration)
TEST_INTEGRATION=1
;;
*)
echo "unknown arg $1"
;;
esac
shift
done
iid="buildx-tests"
testReportsDir="$(pwd)/bin/testreports"
mkdir -p "$testReportsDir"
testReportsVol="-v $testReportsDir:/testreports"
gotestsumArgs="--format=standard-verbose --jsonfile=/testreports/go-test-report$TEST_REPORT_SUFFIX.json --junitfile=/testreports/junit-report$TEST_REPORT_SUFFIX.xml"
gotestArgs="-mod=vendor -coverprofile=/testreports/coverage-report$TEST_REPORT_SUFFIX.txt -covermode=atomic"
${BUILDX_CMD} bake integration-test "${setFlags[@]}" --set "*.output=type=docker,name=$iid"
cacheVolume="buildx-test-cache"
if ! docker container inspect "$cacheVolume" >/dev/null 2>/dev/null; then
docker create -v /root/.cache -v /root/.cache/registry -v /go/pkg/mod --name "$cacheVolume" alpine
fi
if [ "$TEST_INTEGRATION" == 1 ]; then
cid=$(docker create --rm -v /tmp $testReportsVol --volumes-from=$cacheVolume -e GITHUB_REF -e TEST_DOCKERD -e TEST_BUILDKIT_IMAGE -e SKIP_INTEGRATION_TESTS -e GOTESTSUM_FORMAT ${BUILDKIT_INTEGRATION_SNAPSHOTTER:+"-eBUILDKIT_INTEGRATION_SNAPSHOTTER"} -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry --privileged $iid gotestsum $gotestsumArgs --packages="${TESTPKGS:-./...}" -- $gotestArgs ${TESTFLAGS:--v})
docker start -a -i $cid
fi
if [ "$TEST_KEEP_CACHE" != "1" ]; then
docker rm -v $cacheVolume
fi

102
tests/build.go Normal file
View File

@ -0,0 +1,102 @@
package tests
import (
"errors"
"fmt"
"os"
"testing"
"github.com/containerd/containerd/platforms"
"github.com/containerd/continuity/fs/fstest"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/testutil"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/stretchr/testify/require"
)
func buildCmd(sb integration.Sandbox, args ...string) (string, error) {
args = append([]string{"build", "--progress=quiet"}, args...)
cmd := buildxCmd(sb, args...)
out, err := cmd.CombinedOutput()
return string(out), err
}
var buildTests = []func(t *testing.T, sb integration.Sandbox){
testBuild,
testBuildLocalExport,
testBuildRegistryExport,
testBuildTarExport,
}
func testBuild(t *testing.T, sb integration.Sandbox) {
dir := createTestProject(t)
out, err := buildCmd(sb, dir)
require.NoError(t, err, string(out))
}
func testBuildLocalExport(t *testing.T, sb integration.Sandbox) {
dir := createTestProject(t)
out, err := buildCmd(sb, fmt.Sprintf("--output=type=local,dest=%s/result", dir), dir)
require.NoError(t, err, string(out))
dt, err := os.ReadFile(dir + "/result/bar")
require.NoError(t, err)
require.Equal(t, "foo", string(dt))
}
func testBuildTarExport(t *testing.T, sb integration.Sandbox) {
dir := createTestProject(t)
out, err := buildCmd(sb, fmt.Sprintf("--output=type=tar,dest=%s/result.tar", dir), dir)
require.NoError(t, err, string(out))
dt, err := os.ReadFile(fmt.Sprintf("%s/result.tar", dir))
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
require.Contains(t, m, "bar")
require.Equal(t, "foo", string(m["bar"].Data))
}
func testBuildRegistryExport(t *testing.T, sb integration.Sandbox) {
dir := createTestProject(t)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
target := registry + "/buildx/registry:latest"
out, err := buildCmd(sb, fmt.Sprintf("--output=type=image,name=%s,push=true", target), dir)
require.NoError(t, err, string(out))
desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
pk := platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
img := imgs.Find(pk)
require.NotNil(t, img)
require.Len(t, img.Layers, 1)
require.Equal(t, img.Layers[0]["bar"].Data, []byte("foo"))
}
func createTestProject(t *testing.T) string {
dockerfile := []byte(`
FROM busybox:latest AS base
COPY foo /etc/foo
RUN cp /etc/foo /etc/bar
FROM scratch
COPY --from=base /etc/bar /bar
`)
dir, err := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
require.NoError(t, err)
return dir
}

38
tests/inspect.go Normal file
View File

@ -0,0 +1,38 @@
package tests
import (
"strings"
"testing"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/stretchr/testify/require"
)
func inspectCmd(sb integration.Sandbox, args ...string) (string, error) {
args = append([]string{"inspect"}, args...)
cmd := buildxCmd(sb, args...)
out, err := cmd.CombinedOutput()
return string(out), err
}
var inspectTests = []func(t *testing.T, sb integration.Sandbox){
testInspect,
}
func testInspect(t *testing.T, sb integration.Sandbox) {
out, err := inspectCmd(sb)
require.NoError(t, err, string(out))
var name string
var driver string
for _, line := range strings.Split(out, "\n") {
if v, ok := strings.CutPrefix(line, "Name:"); ok && name == "" {
name = strings.TrimSpace(v)
}
if v, ok := strings.CutPrefix(line, "Driver:"); ok && driver == "" {
driver = strings.TrimSpace(v)
}
}
require.Equal(t, sb.Address(), name)
require.Equal(t, sb.Name(), driver)
}

30
tests/integration.go Normal file
View File

@ -0,0 +1,30 @@
package tests
import (
"os"
"os/exec"
"testing"
"github.com/containerd/continuity/fs/fstest"
"github.com/moby/buildkit/util/testutil/integration"
)
func tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) {
tmpdir := t.TempDir()
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
return tmpdir, nil
}
func buildxCmd(sb integration.Sandbox, args ...string) *exec.Cmd {
if builder := sb.Address(); builder != "" {
args = append([]string{"--builder=" + builder}, args...)
}
cmd := exec.Command("buildx", args...)
if context := sb.DockerAddress(); context != "" {
cmd.Env = append(os.Environ(), "DOCKER_CONTEXT="+context)
}
return cmd
}

45
tests/integration_test.go Normal file
View File

@ -0,0 +1,45 @@
package tests
import (
"os"
"testing"
"github.com/distribution/distribution/v3/reference"
"github.com/docker/buildx/tests/workers"
"github.com/moby/buildkit/util/testutil/integration"
)
func init() {
if integration.IsTestDockerd() {
workers.InitDockerWorker()
workers.InitDockerContainerWorker()
} else {
workers.InitRemoteWorker()
}
}
func TestIntegration(t *testing.T) {
var tests []func(t *testing.T, sb integration.Sandbox)
tests = append(tests, buildTests...)
tests = append(tests, inspectTests...)
tests = append(tests, lsTests...)
testIntegration(t, tests...)
}
func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sandbox)) {
mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest")
buildkitImage := "docker.io/moby/buildkit:buildx-stable-1"
if integration.IsTestDockerd() {
if img, ok := os.LookupEnv("TEST_BUILDKIT_IMAGE"); ok {
ref, err := reference.ParseNormalizedNamed(img)
if err == nil {
buildkitImage = ref.String()
}
}
}
mirroredImages["moby/buildkit:buildx-stable-1"] = buildkitImage
mirrors := integration.WithMirroredImages(mirroredImages)
tests := integration.TestFuncs(funcs...)
integration.Run(t, tests, mirrors)
}

33
tests/ls.go Normal file
View File

@ -0,0 +1,33 @@
package tests
import (
"strings"
"testing"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/stretchr/testify/require"
)
func lsCmd(sb integration.Sandbox, args ...string) (string, error) {
args = append([]string{"ls"}, args...)
cmd := buildxCmd(sb, args...)
out, err := cmd.CombinedOutput()
return string(out), err
}
var lsTests = []func(t *testing.T, sb integration.Sandbox){
testLs,
}
func testLs(t *testing.T, sb integration.Sandbox) {
out, err := lsCmd(sb)
require.NoError(t, err, string(out))
for _, line := range strings.Split(out, "\n") {
if strings.Contains(line, sb.Address()) {
require.Contains(t, line, sb.Name())
return
}
}
require.Fail(t, out)
}

26
tests/workers/backend.go Normal file
View File

@ -0,0 +1,26 @@
package workers
type backend struct {
builder string
context string
}
func (s *backend) Address() string {
return s.builder
}
func (s *backend) DockerAddress() string {
return s.context
}
func (s *backend) ContainerdAddress() string {
return ""
}
func (s *backend) Snapshotter() string {
return ""
}
func (s *backend) Rootless() bool {
return false
}

View File

@ -0,0 +1,66 @@
package workers
import (
"context"
"os"
"os/exec"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
func InitDockerContainerWorker() {
integration.Register(&containerWorker{
id: "docker-container",
})
}
type containerWorker struct {
id string
}
func (w *containerWorker) Name() string {
return w.id
}
func (w *containerWorker) Rootless() bool {
return false
}
func (w *containerWorker) New(ctx context.Context, cfg *integration.BackendConfig) (integration.Backend, func() error, error) {
bk, bkclose, err := dockerWorker{id: w.id}.New(ctx, cfg)
if err != nil {
return bk, bkclose, err
}
name := "integration-container-" + identity.NewID()
cmd := exec.Command("buildx", "create",
"--bootstrap",
"--name="+name,
"--config="+cfg.ConfigFile,
"--driver=docker-container",
"--driver-opt=network=host",
)
cmd.Env = append(os.Environ(), "DOCKER_CONTEXT="+bk.DockerAddress())
if err := cmd.Run(); err != nil {
return nil, nil, errors.Wrapf(err, "failed to create buildx instance %s", name)
}
cl := func() error {
var err error
if err1 := bkclose(); err == nil {
err = err1
}
cmd := exec.Command("buildx", "rm", "-f", name)
if err1 := cmd.Run(); err == nil {
err = err1
}
return err
}
return &backend{
context: bk.DockerAddress(),
builder: name,
}, cl, nil
}

64
tests/workers/docker.go Normal file
View File

@ -0,0 +1,64 @@
package workers
import (
"context"
"os/exec"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
func InitDockerWorker() {
integration.Register(&dockerWorker{
id: "docker",
})
}
type dockerWorker struct {
id string
}
func (c dockerWorker) Name() string {
return c.id
}
func (c dockerWorker) Rootless() bool {
return false
}
func (c dockerWorker) New(ctx context.Context, cfg *integration.BackendConfig) (b integration.Backend, cl func() error, err error) {
moby := integration.Moby{
ID: c.id,
}
bk, bkclose, err := moby.New(ctx, cfg)
if err != nil {
return bk, cl, err
}
name := "integration-" + identity.NewID()
cmd := exec.Command("docker", "context", "create",
name,
"--docker", "host="+bk.DockerAddress(),
)
if err := cmd.Run(); err != nil {
return nil, cl, errors.Wrapf(err, "failed to create buildx instance %s", name)
}
cl = func() error {
var err error
if err1 := bkclose(); err == nil {
err = err1
}
cmd := exec.Command("docker", "context", "rm", "-f", name)
if err1 := cmd.Run(); err1 != nil {
err = errors.Wrapf(err1, "failed to remove buildx instance %s", name)
}
return err
}
return &backend{
builder: name,
context: name,
}, cl, nil
}

63
tests/workers/remote.go Normal file
View File

@ -0,0 +1,63 @@
package workers
import (
"context"
"os/exec"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
func InitRemoteWorker() {
integration.Register(&remoteWorker{
id: "remote",
})
}
type remoteWorker struct {
id string
}
func (w remoteWorker) Name() string {
return w.id
}
func (w remoteWorker) Rootless() bool {
return false
}
func (w remoteWorker) New(ctx context.Context, cfg *integration.BackendConfig) (b integration.Backend, cl func() error, err error) {
oci := integration.OCI{ID: w.id}
bk, bkclose, err := oci.New(ctx, cfg)
if err != nil {
return bk, cl, err
}
name := "integration-remote-" + identity.NewID()
cmd := exec.Command("buildx", "create",
"--bootstrap",
"--name="+name,
"--driver=remote",
bk.Address(),
)
if err := cmd.Run(); err != nil {
return nil, nil, errors.Wrapf(err, "failed to create buildx instance %s", name)
}
cl = func() error {
var err error
if err1 := bkclose(); err == nil {
err = err1
}
cmd := exec.Command("buildx", "rm", "-f", name)
if err1 := cmd.Run(); err == nil {
err = err1
}
return err
}
return &backend{
builder: name,
}, cl, nil
}

View File

@ -0,0 +1,522 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"context"
"encoding/json"
"fmt"
"io"
"path"
"sort"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type exportOptions struct {
manifests []ocispec.Descriptor
platform platforms.MatchComparer
allPlatforms bool
skipDockerManifest bool
blobRecordOptions blobRecordOptions
}
// ExportOpt defines options for configuring exported descriptors
type ExportOpt func(context.Context, *exportOptions) error
// WithPlatform defines the platform to require manifest lists have
// not exporting all platforms.
// Additionally, platform is used to resolve image configs for
// Docker v1.1, v1.2 format compatibility.
func WithPlatform(p platforms.MatchComparer) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
o.platform = p
return nil
}
}
// WithAllPlatforms exports all manifests from a manifest list.
// Missing content will fail the export.
func WithAllPlatforms() ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
o.allPlatforms = true
return nil
}
}
// WithSkipDockerManifest skips creation of the Docker compatible
// manifest.json file.
func WithSkipDockerManifest() ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
o.skipDockerManifest = true
return nil
}
}
// WithImage adds the provided images to the exported archive.
func WithImage(is images.Store, name string) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
img, err := is.Get(ctx, name)
if err != nil {
return err
}
img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations)
o.manifests = append(o.manifests, img.Target)
return nil
}
}
// WithImages adds multiples images to the exported archive.
func WithImages(imgs []images.Image) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
for _, img := range imgs {
img.Target.Annotations = addNameAnnotation(img.Name, img.Target.Annotations)
o.manifests = append(o.manifests, img.Target)
}
return nil
}
}
// WithManifest adds a manifest to the exported archive.
// When names are given they will be set on the manifest in the
// exported archive, creating an index record for each name.
// When no names are provided, it is up to caller to put name annotation to
// on the manifest descriptor if needed.
func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
if len(names) == 0 {
o.manifests = append(o.manifests, manifest)
}
for _, name := range names {
mc := manifest
mc.Annotations = addNameAnnotation(name, manifest.Annotations)
o.manifests = append(o.manifests, mc)
}
return nil
}
}
// BlobFilter returns false if the blob should not be included in the archive.
type BlobFilter func(ocispec.Descriptor) bool
// WithBlobFilter specifies BlobFilter.
func WithBlobFilter(f BlobFilter) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
o.blobRecordOptions.blobFilter = f
return nil
}
}
// WithSkipNonDistributableBlobs excludes non-distributable blobs such as Windows base layers.
func WithSkipNonDistributableBlobs() ExportOpt {
f := func(desc ocispec.Descriptor) bool {
return !images.IsNonDistributable(desc.MediaType)
}
return WithBlobFilter(f)
}
func addNameAnnotation(name string, base map[string]string) map[string]string {
annotations := map[string]string{}
for k, v := range base {
annotations[k] = v
}
annotations[images.AnnotationImageName] = name
annotations[ocispec.AnnotationRefName] = ociReferenceName(name)
return annotations
}
// Export implements Exporter.
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
var eo exportOptions
for _, opt := range opts {
if err := opt(ctx, &eo); err != nil {
return err
}
}
records := []tarRecord{
ociLayoutFile(""),
ociIndexRecord(eo.manifests),
}
algorithms := map[string]struct{}{}
dManifests := map[digest.Digest]*exportManifest{}
resolvedIndex := map[digest.Digest]digest.Digest{}
for _, desc := range eo.manifests {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
mt, ok := dManifests[desc.Digest]
if !ok {
// TODO(containerd): Skip if already added
r, err := getRecords(ctx, store, desc, algorithms, &eo.blobRecordOptions)
if err != nil {
return err
}
records = append(records, r...)
mt = &exportManifest{
manifest: desc,
}
dManifests[desc.Digest] = mt
}
name := desc.Annotations[images.AnnotationImageName]
if name != "" && !eo.skipDockerManifest {
mt.names = append(mt.names, name)
}
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
d, ok := resolvedIndex[desc.Digest]
if !ok {
if err := desc.Digest.Validate(); err != nil {
return err
}
records = append(records, blobRecord(store, desc, &eo.blobRecordOptions))
p, err := content.ReadBlob(ctx, store, desc)
if err != nil {
return err
}
var index ocispec.Index
if err := json.Unmarshal(p, &index); err != nil {
return err
}
var manifests []ocispec.Descriptor
for _, m := range index.Manifests {
if eo.platform != nil {
if m.Platform == nil || eo.platform.Match(*m.Platform) {
manifests = append(manifests, m)
} else if !eo.allPlatforms {
continue
}
}
r, err := getRecords(ctx, store, m, algorithms, &eo.blobRecordOptions)
if err != nil {
return err
}
records = append(records, r...)
}
if !eo.skipDockerManifest {
if len(manifests) >= 1 {
if len(manifests) > 1 {
sort.SliceStable(manifests, func(i, j int) bool {
if manifests[i].Platform == nil {
return false
}
if manifests[j].Platform == nil {
return true
}
return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform)
})
}
d = manifests[0].Digest
dManifests[d] = &exportManifest{
manifest: manifests[0],
}
} else if eo.platform != nil {
return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound)
}
}
resolvedIndex[desc.Digest] = d
}
if d != "" {
if name := desc.Annotations[images.AnnotationImageName]; name != "" {
mt := dManifests[d]
mt.names = append(mt.names, name)
}
}
default:
return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument)
}
}
if len(dManifests) > 0 {
tr, err := manifestsRecord(ctx, store, dManifests)
if err != nil {
return fmt.Errorf("unable to create manifests file: %w", err)
}
records = append(records, tr)
}
if len(algorithms) > 0 {
records = append(records, directoryRecord("blobs/", 0755))
for alg := range algorithms {
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
}
}
tw := tar.NewWriter(writer)
defer tw.Close()
return writeTar(ctx, tw, records)
}
func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}, brOpts *blobRecordOptions) ([]tarRecord, error) {
var records []tarRecord
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if err := desc.Digest.Validate(); err != nil {
return nil, err
}
records = append(records, blobRecord(store, desc, brOpts))
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
return nil, nil
}
childrenHandler := images.ChildrenHandler(store)
handlers := images.Handlers(
childrenHandler,
images.HandlerFunc(exportHandler),
)
// Walk sequentially since the number of fetches is likely one and doing in
// parallel requires locking the export handler
if err := images.Walk(ctx, handlers, desc); err != nil {
return nil, err
}
return records, nil
}
type tarRecord struct {
Header *tar.Header
CopyTo func(context.Context, io.Writer) (int64, error)
}
type blobRecordOptions struct {
blobFilter BlobFilter
}
func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOptions) tarRecord {
if opts != nil && opts.blobFilter != nil && !opts.blobFilter(desc) {
return tarRecord{}
}
path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
return tarRecord{
Header: &tar.Header{
Name: path,
Mode: 0444,
Size: desc.Size,
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
r, err := cs.ReaderAt(ctx, desc)
if err != nil {
return 0, fmt.Errorf("failed to get reader: %w", err)
}
defer r.Close()
// Verify digest
dgstr := desc.Digest.Algorithm().Digester()
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
if err != nil {
return 0, fmt.Errorf("failed to copy to tar: %w", err)
}
if dgstr.Digest() != desc.Digest {
return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest())
}
return n, nil
},
}
}
func directoryRecord(name string, mode int64) tarRecord {
return tarRecord{
Header: &tar.Header{
Name: name,
Mode: mode,
Typeflag: tar.TypeDir,
},
}
}
func ociLayoutFile(version string) tarRecord {
if version == "" {
version = ocispec.ImageLayoutVersion
}
layout := ocispec.ImageLayout{
Version: version,
}
b, err := json.Marshal(layout)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: ocispec.ImageLayoutFile,
Mode: 0444,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
index := ocispec.Index{
Versioned: ocispecs.Versioned{
SchemaVersion: 2,
},
Manifests: manifests,
}
b, err := json.Marshal(index)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: "index.json",
Mode: 0644,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
type exportManifest struct {
manifest ocispec.Descriptor
names []string
}
func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) {
mfsts := make([]struct {
Config string
RepoTags []string
Layers []string
}, len(manifests))
var i int
for _, m := range manifests {
p, err := content.ReadBlob(ctx, store, m.manifest)
if err != nil {
return tarRecord{}, err
}
var manifest ocispec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return tarRecord{}, err
}
if err := manifest.Config.Digest.Validate(); err != nil {
return tarRecord{}, fmt.Errorf("invalid manifest %q: %w", m.manifest.Digest, err)
}
dgst := manifest.Config.Digest
if err := dgst.Validate(); err != nil {
return tarRecord{}, err
}
mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded())
for _, l := range manifest.Layers {
path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
mfsts[i].Layers = append(mfsts[i].Layers, path)
}
for _, name := range m.names {
nname, err := familiarizeReference(name)
if err != nil {
return tarRecord{}, err
}
mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname)
}
i++
}
b, err := json.Marshal(mfsts)
if err != nil {
return tarRecord{}, err
}
return tarRecord{
Header: &tar.Header{
Name: "manifest.json",
Mode: 0644,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}, nil
}
func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord) error {
var records []tarRecord
for _, r := range recordsWithEmpty {
if r.Header != nil {
records = append(records, r)
}
}
sort.Slice(records, func(i, j int) bool {
return records[i].Header.Name < records[j].Header.Name
})
var last string
for _, record := range records {
if record.Header.Name == last {
continue
}
last = record.Header.Name
if err := tw.WriteHeader(record.Header); err != nil {
return err
}
if record.CopyTo != nil {
n, err := record.CopyTo(ctx, tw)
if err != nil {
return err
}
if n != record.Header.Size {
return fmt.Errorf("unexpected copy size for %s", record.Header.Name)
}
} else if record.Header.Size > 0 {
return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
}
}
return nil
}

View File

@ -0,0 +1,420 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package archive provides a Docker and OCI compatible importer
package archive
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"path"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type importOpts struct {
compress bool
}
// ImportOpt is an option for importing an OCI index
type ImportOpt func(*importOpts) error
// WithImportCompression compresses uncompressed layers on import.
// This is used for import formats which do not include the manifest.
func WithImportCompression() ImportOpt {
return func(io *importOpts) error {
io.compress = true
return nil
}
}
// ImportIndex imports an index from a tar archive image bundle
// - implements Docker v1.1, v1.2 and OCI v1.
// - prefers OCI v1 when provided
// - creates OCI index for Docker formats
// - normalizes Docker references and adds as OCI ref name
// e.g. alpine:latest -> docker.io/library/alpine:latest
// - existing OCI reference names are untouched
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
var (
tr = tar.NewReader(reader)
ociLayout ocispec.ImageLayout
mfsts []struct {
Config string
RepoTags []string
Layers []string
}
symlinks = make(map[string]string)
blobs = make(map[string]ocispec.Descriptor)
iopts importOpts
)
for _, o := range opts {
if err := o(&iopts); err != nil {
return ocispec.Descriptor{}, err
}
}
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return ocispec.Descriptor{}, err
}
if hdr.Typeflag == tar.TypeSymlink {
symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname)
}
//nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA
if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
if hdr.Typeflag != tar.TypeDir {
log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored")
}
continue
}
hdrName := path.Clean(hdr.Name)
if hdrName == ocispec.ImageLayoutFile {
if err = onUntarJSON(tr, &ociLayout); err != nil {
return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err)
}
} else if hdrName == "manifest.json" {
if err = onUntarJSON(tr, &mfsts); err != nil {
return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err)
}
} else {
dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err)
}
blobs[hdrName] = ocispec.Descriptor{
Digest: dgst,
Size: hdr.Size,
}
}
}
// If OCI layout was given, interpret the tar as an OCI layout.
// When not provided, the layout of the tar will be interpreted
// as Docker v1.1 or v1.2.
if ociLayout.Version != "" {
if ociLayout.Version != ocispec.ImageLayoutVersion {
return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version)
}
idx, ok := blobs["index.json"]
if !ok {
return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
}
idx.MediaType = ocispec.MediaTypeImageIndex
return idx, nil
}
if mfsts == nil {
return ocispec.Descriptor{}, errors.New("unrecognized image format")
}
for name, linkname := range symlinks {
desc, ok := blobs[linkname]
if !ok {
return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname)
}
blobs[name] = desc
}
idx := ocispec.Index{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
}
for _, mfst := range mfsts {
config, ok := blobs[mfst.Config]
if !ok {
return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config)
}
config.MediaType = images.MediaTypeDockerSchema2Config
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err)
}
manifest := struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Config ocispec.Descriptor `json:"config"`
Layers []ocispec.Descriptor `json:"layers"`
}{
SchemaVersion: 2,
MediaType: images.MediaTypeDockerSchema2Manifest,
Config: config,
Layers: layers,
}
desc, err := writeManifest(ctx, store, manifest, manifest.MediaType)
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err)
}
imgPlatforms, err := images.Platforms(ctx, store, desc)
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err)
}
if len(imgPlatforms) > 0 {
// Only one platform can be resolved from non-index manifest,
// The platform can only come from the config included above,
// if the config has no platform it can be safely omitted.
desc.Platform = &imgPlatforms[0]
// If the image we've just imported is a Windows image without the OSVersion set,
// we could just assume it matches this host's OS Version. Without this, the
// children labels might not be set on the image content, leading to it being
// garbage collected, breaking the image.
// See: https://github.com/containerd/containerd/issues/5690
if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" {
platform := platforms.DefaultSpec()
desc.Platform.OSVersion = platform.OSVersion
}
}
if len(mfst.RepoTags) == 0 {
idx.Manifests = append(idx.Manifests, desc)
} else {
// Add descriptor per tag
for _, ref := range mfst.RepoTags {
mfstdesc := desc
normalized, err := normalizeReference(ref)
if err != nil {
return ocispec.Descriptor{}, err
}
mfstdesc.Annotations = map[string]string{
images.AnnotationImageName: normalized,
ocispec.AnnotationRefName: ociReferenceName(normalized),
}
idx.Manifests = append(idx.Manifests, mfstdesc)
}
}
}
return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex)
}
const (
kib = 1024
mib = 1024 * kib
jsonLimit = 20 * mib
)
func onUntarJSON(r io.Reader, j interface{}) error {
return json.NewDecoder(io.LimitReader(r, jsonLimit)).Decode(j)
}
func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) {
dgstr := digest.Canonical.Digester()
if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil {
return "", err
}
return dgstr.Digest(), nil
}
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
layers := make([]ocispec.Descriptor, len(layerFiles))
descs := map[digest.Digest]*ocispec.Descriptor{}
filters := []string{}
for i, f := range layerFiles {
desc, ok := blobs[f]
if !ok {
return nil, fmt.Errorf("layer %q not found", f)
}
layers[i] = desc
descs[desc.Digest] = &layers[i]
filters = append(filters, fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String()))
}
err := store.Walk(ctx, func(info content.Info) error {
dgst, ok := info.Labels[labels.LabelUncompressed]
if ok {
desc := descs[digest.Digest(dgst)]
if desc != nil {
desc.Digest = info.Digest
desc.Size = info.Size
mediaType, err := detectLayerMediaType(ctx, store, *desc)
if err != nil {
return fmt.Errorf("failed to detect media type of layer: %w", err)
}
desc.MediaType = mediaType
}
}
return nil
}, filters...)
if err != nil {
return nil, fmt.Errorf("failure checking for compressed blobs: %w", err)
}
for i, desc := range layers {
if desc.MediaType != "" {
continue
}
// Open blob, resolve media type
ra, err := store.ReaderAt(ctx, desc)
if err != nil {
return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err)
}
s, err := compression.DecompressStream(content.NewReader(ra))
if err != nil {
ra.Close()
return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err)
}
if s.GetCompression() == compression.Uncompressed {
if compress {
if err := desc.Digest.Validate(); err != nil {
return nil, err
}
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
labels := map[string]string{
labels.LabelUncompressed: desc.Digest.String(),
}
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
if err != nil {
s.Close()
ra.Close()
return nil, err
}
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
} else {
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
}
} else {
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
}
s.Close()
ra.Close()
}
return layers, nil
}
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err)
}
defer func() {
w.Close()
if err != nil {
cs.Abort(ctx, ref)
}
}()
if err := w.Truncate(0); err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err)
}
cw, err := compression.CompressStream(w, compression.Gzip)
if err != nil {
return ocispec.Descriptor{}, err
}
if _, err := io.Copy(cw, r); err != nil {
return ocispec.Descriptor{}, err
}
if err := cw.Close(); err != nil {
return ocispec.Descriptor{}, err
}
cst, err := w.Status()
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err)
}
desc.Digest = w.Digest()
desc.Size = cst.Offset
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err)
}
}
return desc, nil
}
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
manifestBytes, err := json.Marshal(manifest)
if err != nil {
return ocispec.Descriptor{}, err
}
desc := ocispec.Descriptor{
MediaType: mediaType,
Digest: digest.FromBytes(manifestBytes),
Size: int64(len(manifestBytes)),
}
if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil {
return ocispec.Descriptor{}, err
}
return desc, nil
}
func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) {
var mediaType string
// need to parse existing blob to use the proper media type
bytes := make([]byte, 10)
ra, err := store.ReaderAt(ctx, desc)
if err != nil {
return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err)
}
defer ra.Close()
_, err = ra.ReadAt(bytes, 0)
if err != nil && err != io.EOF {
return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err)
}
if err == io.EOF {
// in the case of an empty layer then the media type should be uncompressed
return images.MediaTypeDockerSchema2Layer, nil
}
switch c := compression.DetectCompression(bytes); c {
case compression.Uncompressed:
mediaType = images.MediaTypeDockerSchema2Layer
default:
mediaType = images.MediaTypeDockerSchema2LayerGzip
}
return mediaType, nil
}

View File

@ -0,0 +1,115 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"fmt"
"strings"
"github.com/containerd/containerd/reference"
distref "github.com/containerd/containerd/reference/docker"
"github.com/opencontainers/go-digest"
)
// FilterRefPrefix restricts references to having the given image
// prefix. Tag-only references will have the prefix prepended.
func FilterRefPrefix(image string) func(string) string {
return refTranslator(image, true)
}
// AddRefPrefix prepends the given image prefix to tag-only references,
// while leaving returning full references unmodified.
func AddRefPrefix(image string) func(string) string {
return refTranslator(image, false)
}
// refTranslator creates a reference which only has a tag or verifies
// a full reference.
func refTranslator(image string, checkPrefix bool) func(string) string {
return func(ref string) string {
if image == "" {
return ""
}
// Check if ref is full reference
if strings.ContainsAny(ref, "/:@") {
// If not prefixed, don't include image
if checkPrefix && !isImagePrefix(ref, image) {
return ""
}
return ref
}
return image + ":" + ref
}
}
func isImagePrefix(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
if len(s) > len(prefix) {
switch s[len(prefix)] {
case '/', ':', '@':
// Prevent matching partial namespaces
default:
return false
}
}
return true
}
func normalizeReference(ref string) (string, error) {
// TODO: Replace this function to not depend on reference package
normalized, err := distref.ParseDockerRef(ref)
if err != nil {
return "", fmt.Errorf("normalize image ref %q: %w", ref, err)
}
return normalized.String(), nil
}
func familiarizeReference(ref string) (string, error) {
named, err := distref.ParseNormalizedNamed(ref)
if err != nil {
return "", fmt.Errorf("failed to parse %q: %w", ref, err)
}
named = distref.TagNameOnly(named)
return distref.FamiliarString(named), nil
}
func ociReferenceName(name string) string {
// OCI defines the reference name as only a tag excluding the
// repository. The containerd annotation contains the full image name
// since the tag is insufficient for correctly naming and referring to an
// image
var ociRef string
if spec, err := reference.Parse(name); err == nil {
ociRef = spec.Object
} else {
ociRef = name
}
return ociRef
}
// DigestTranslator creates a digest reference by adding the
// digest to an image name
func DigestTranslator(prefix string) func(digest.Digest) string {
return func(dgst digest.Digest) string {
return prefix + "@" + dgst.String()
}
}

25
vendor/github.com/containerd/continuity/.gitignore generated vendored Normal file
View File

@ -0,0 +1,25 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
bin
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

18
vendor/github.com/containerd/continuity/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
linters:
enable:
- structcheck
- varcheck
- staticcheck
- unconvert
- gofmt
- goimports
- ineffassign
- revive
- vet
- unused
- misspell
disable:
- errcheck
run:
timeout: 3m

10
vendor/github.com/containerd/continuity/.mailmap generated vendored Normal file
View File

@ -0,0 +1,10 @@
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
Stephen J Day <stevvooe@gmail.com> <stephen.day@getcruise.com>
Stephen J Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
Stephen J Day <stevvooe@gmail.com> <stephen.day@docker.com>

73
vendor/github.com/containerd/continuity/Makefile generated vendored Normal file
View File

@ -0,0 +1,73 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
PKG=github.com/containerd/continuity
PACKAGES=$(shell go list -mod=vendor ./... | grep -v /vendor/)
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
${PACKAGES}, \
$(shell \
for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile); do \
d="$$(dirname $$f)"; \
[ "$$d" = "." ] && echo "${PKG}" && continue; \
echo "${PKG}/$$d"; \
done | sort -u) \
)
.PHONY: clean all lint build test binaries
.DEFAULT: default
all: AUTHORS clean lint build test binaries
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
${PREFIX}/bin/continuity:
@echo "+ $@"
@(cd cmd/continuity && go build -mod=mod -o $@ ${GO_GCFLAGS} .)
generate:
go generate -mod=vendor $(PACKAGES)
lint:
@echo "+ $@"
@golangci-lint run
build:
@echo "+ $@"
@go build -mod=vendor -v ${GO_LDFLAGS} $(PACKAGES)
test:
@echo "+ $@"
@go test -mod=vendor $(PACKAGES)
root-test:
@echo "+ $@"
@go test -exec sudo ${TEST_REQUIRES_ROOT_PACKAGES} -test.root
test-compile:
@echo "+ $@"
@for pkg in $(PACKAGES); do go test -mod=vendor -c $$pkg; done
binaries: ${PREFIX}/bin/continuity
@echo "+ $@"
@if [ x$$GOOS = xwindows ]; then echo "+ continuity -> continuity.exe"; mv ${PREFIX}/bin/continuity ${PREFIX}/bin/continuity.exe; fi
clean:
@echo "+ $@"
@rm -rf "${PREFIX}/bin/continuity" "${PREFIX}/bin/continuity.exe"

89
vendor/github.com/containerd/continuity/README.md generated vendored Normal file
View File

@ -0,0 +1,89 @@
# continuity
[![Go Reference](https://pkg.go.dev/badge/github.com/containerd/continuity.svg)](https://pkg.go.dev/github.com/containerd/continuity)
[![Build Status](https://github.com/containerd/continuity/workflows/Continuity/badge.svg)](https://github.com/containerd/continuity/actions?query=workflow%3AContinuity+branch%3Amain)
A transport-agnostic, filesystem metadata manifest system
This project is a staging area for experiments in providing transport agnostic
metadata storage.
See [opencontainers/runtime-spec#11](https://github.com/opencontainers/runtime-spec/issues/11)
for more details.
## Manifest Format
A continuity manifest encodes filesystem metadata in Protocol Buffers.
Refer to [proto/manifest.proto](proto/manifest.proto) for more details.
## Usage
Build:
```console
$ make
```
Create a manifest (of this repo itself):
```console
$ ./bin/continuity build . > /tmp/a.pb
```
Dump a manifest:
```console
$ ./bin/continuity ls /tmp/a.pb
...
-rw-rw-r-- 270 B /.gitignore
-rw-rw-r-- 88 B /.mailmap
-rw-rw-r-- 187 B /.travis.yml
-rw-rw-r-- 359 B /AUTHORS
-rw-rw-r-- 11 kB /LICENSE
-rw-rw-r-- 1.5 kB /Makefile
...
-rw-rw-r-- 986 B /testutil_test.go
drwxrwxr-x 0 B /version
-rw-rw-r-- 478 B /version/version.go
```
Verify a manifest:
```console
$ ./bin/continuity verify . /tmp/a.pb
```
Break the directory and restore using the manifest:
```console
$ chmod 777 Makefile
$ ./bin/continuity verify . /tmp/a.pb
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
$ ./bin/continuity apply . /tmp/a.pb
$ stat -c %a Makefile
664
$ ./bin/continuity verify . /tmp/a.pb
```
## Platforms
continuity primarily targets Linux. Continuity may compile for and work on
other operating systems, but those platforms are not tested.
## Contribution Guide
### Building Proto Package
If you change the proto file you will need to rebuild the generated Go with `go generate`.
```console
$ go generate ./proto
```
## Project details
continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository.

660
vendor/github.com/containerd/continuity/context.go generated vendored Normal file
View File

@ -0,0 +1,660 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/containerd/continuity/devices"
driverpkg "github.com/containerd/continuity/driver"
"github.com/containerd/continuity/pathdriver"
"github.com/opencontainers/go-digest"
)
var (
// ErrNotFound represents the resource not found
ErrNotFound = fmt.Errorf("not found")
// ErrNotSupported represents the resource not supported
ErrNotSupported = fmt.Errorf("not supported")
)
// Context represents a file system context for accessing resources. The
// responsibility of the context is to convert system specific resources to
// generic Resource objects. Most of this is safe path manipulation, as well
// as extraction of resource details.
type Context interface {
Apply(Resource) error
Verify(Resource) error
Resource(string, os.FileInfo) (Resource, error)
Walk(filepath.WalkFunc) error
}
// SymlinkPath is intended to give the symlink target value
// in a root context. Target and linkname are absolute paths
// not under the given root.
type SymlinkPath func(root, linkname, target string) (string, error)
// ContextOptions represents options to create a new context.
type ContextOptions struct {
Digester Digester
Driver driverpkg.Driver
PathDriver pathdriver.PathDriver
Provider ContentProvider
}
// context represents a file system context for accessing resources.
// Generally, all path qualified access and system considerations should land
// here.
type context struct {
driver driverpkg.Driver
pathDriver pathdriver.PathDriver
root string
digester Digester
provider ContentProvider
}
// NewContext returns a Context associated with root. The default driver will
// be used, as returned by NewDriver.
func NewContext(root string) (Context, error) {
return NewContextWithOptions(root, ContextOptions{})
}
// NewContextWithOptions returns a Context associate with the root.
func NewContextWithOptions(root string, options ContextOptions) (Context, error) {
// normalize to absolute path
pathDriver := options.PathDriver
if pathDriver == nil {
pathDriver = pathdriver.LocalPathDriver
}
root = pathDriver.FromSlash(root)
root, err := pathDriver.Abs(pathDriver.Clean(root))
if err != nil {
return nil, err
}
driver := options.Driver
if driver == nil {
driver, err = driverpkg.NewSystemDriver()
if err != nil {
return nil, err
}
}
digester := options.Digester
if digester == nil {
digester = simpleDigester{digest.Canonical}
}
// Check the root directory. Need to be a little careful here. We are
// allowing a link for now, but this may have odd behavior when
// canonicalizing paths. As long as all files are opened through the link
// path, this should be okay.
fi, err := driver.Stat(root)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid}
}
return &context{
root: root,
driver: driver,
pathDriver: pathDriver,
digester: digester,
provider: options.Provider,
}, nil
}
// Resource returns the resource as path p, populating the entry with info
// from fi. The path p should be the path of the resource in the context,
// typically obtained through Walk or from the value of Resource.Path(). If fi
// is nil, it will be resolved.
func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) {
fp, err := c.fullpath(p)
if err != nil {
return nil, err
}
if fi == nil {
fi, err = c.driver.Lstat(fp)
if err != nil {
return nil, err
}
}
base, err := newBaseResource(p, fi)
if err != nil {
return nil, err
}
base.xattrs, err = c.resolveXAttrs(fp, fi, base)
if err != nil && err != ErrNotSupported {
return nil, err
}
// TODO(stevvooe): Handle windows alternate data streams.
if fi.Mode().IsRegular() {
dgst, err := c.digest(p)
if err != nil {
return nil, err
}
return newRegularFile(*base, base.paths, fi.Size(), dgst)
}
if fi.Mode().IsDir() {
return newDirectory(*base)
}
if fi.Mode()&os.ModeSymlink != 0 {
// We handle relative links vs absolute links by including a
// beginning slash for absolute links. Effectively, the bundle's
// root is treated as the absolute link anchor.
target, err := c.driver.Readlink(fp)
if err != nil {
return nil, err
}
return newSymLink(*base, target)
}
if fi.Mode()&os.ModeNamedPipe != 0 {
return newNamedPipe(*base, base.paths)
}
if fi.Mode()&os.ModeDevice != 0 {
deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver)
if !ok {
return nil, fmt.Errorf("device extraction is not supported for %s: %w", fp, ErrNotSupported)
}
// character and block devices merely need to recover the
// major/minor device number.
major, minor, err := deviceDriver.DeviceInfo(fi)
if err != nil {
return nil, err
}
return newDevice(*base, base.paths, major, minor)
}
return nil, fmt.Errorf("%q (%v) is not supported: %w", fp, fi.Mode(), ErrNotFound)
}
func (c *context) verifyMetadata(resource, target Resource) error {
if target.Mode() != resource.Mode() {
return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode())
}
if target.UID() != resource.UID() {
return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID())
}
if target.GID() != resource.GID() {
return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID())
}
if xattrer, ok := resource.(XAttrer); ok {
txattrer, tok := target.(XAttrer)
if !tok {
return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path())
}
// For xattrs, only ensure that we have those defined in the resource
// and their values match. We can ignore other xattrs. In other words,
// we only verify that target has the subset defined by resource.
txattrs := txattrer.XAttrs()
for attr, value := range xattrer.XAttrs() {
tvalue, ok := txattrs[attr]
if !ok {
return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr)
}
if !bytes.Equal(value, tvalue) {
return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path())
}
}
}
switch r := resource.(type) {
case RegularFile:
// TODO(stevvooe): Another reason to use a record-based approach. We
// have to do another type switch to get this to work. This could be
// fixed with an Equal function, but let's study this a little more to
// be sure.
t, ok := target.(RegularFile)
if !ok {
return fmt.Errorf("resource %q target not a regular file", r.Path())
}
if t.Size() != r.Size() {
return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size())
}
case Directory:
t, ok := target.(Directory)
if !ok {
return fmt.Errorf("resource %q target not a directory", t.Path())
}
case SymLink:
t, ok := target.(SymLink)
if !ok {
return fmt.Errorf("resource %q target not a symlink", t.Path())
}
if t.Target() != r.Target() {
return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target())
}
case Device:
t, ok := target.(Device)
if !ok {
return fmt.Errorf("resource %q is not a device", t.Path())
}
if t.Major() != r.Major() || t.Minor() != r.Minor() {
return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor())
}
case NamedPipe:
t, ok := target.(NamedPipe)
if !ok {
return fmt.Errorf("resource %q is not a named pipe", t.Path())
}
default:
return fmt.Errorf("cannot verify resource: %v", resource)
}
return nil
}
// Verify the resource in the context. An error will be returned a discrepancy
// is found.
func (c *context) Verify(resource Resource) error {
fp, err := c.fullpath(resource.Path())
if err != nil {
return err
}
fi, err := c.driver.Lstat(fp)
if err != nil {
return err
}
target, err := c.Resource(resource.Path(), fi)
if err != nil {
return err
}
if target.Path() != resource.Path() {
return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path())
}
if err := c.verifyMetadata(resource, target); err != nil {
return err
}
if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
hardlinkKey, err := newHardlinkKey(fi)
if err == errNotAHardLink {
if len(h.Paths()) > 1 {
return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path())
}
} else if err != nil {
return err
}
for _, path := range h.Paths()[1:] {
fpLink, err := c.fullpath(path)
if err != nil {
return err
}
fiLink, err := c.driver.Lstat(fpLink)
if err != nil {
return err
}
targetLink, err := c.Resource(path, fiLink)
if err != nil {
return err
}
hardlinkKeyLink, err := newHardlinkKey(fiLink)
if err != nil {
return err
}
if hardlinkKeyLink != hardlinkKey {
return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path())
}
if err := c.verifyMetadata(resource, targetLink); err != nil {
return err
}
}
}
switch r := resource.(type) {
case RegularFile:
t, ok := target.(RegularFile)
if !ok {
return fmt.Errorf("resource %q target not a regular file", r.Path())
}
// TODO(stevvooe): This may need to get a little more sophisticated
// for digest comparison. We may want to actually calculate the
// provided digests, rather than the implementations having an
// overlap.
if !digestsMatch(t.Digests(), r.Digests()) {
return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests())
}
}
return nil
}
func (c *context) checkoutFile(fp string, rf RegularFile) error {
if c.provider == nil {
return fmt.Errorf("no file provider")
}
var (
r io.ReadCloser
err error
)
for _, dgst := range rf.Digests() {
r, err = c.provider.Reader(dgst)
if err == nil {
break
}
}
if err != nil {
return fmt.Errorf("file content could not be provided: %w", err)
}
defer r.Close()
return atomicWriteFile(fp, r, rf.Size(), rf.Mode())
}
// Apply the resource to the contexts. An error will be returned if the
// operation fails. Depending on the resource type, the resource may be
// created. For resource that cannot be resolved, an error will be returned.
func (c *context) Apply(resource Resource) error {
fp, err := c.fullpath(resource.Path())
if err != nil {
return err
}
if !strings.HasPrefix(fp, c.root) {
return fmt.Errorf("resource %v escapes root", resource)
}
var chmod = true
fi, err := c.driver.Lstat(fp)
if err != nil {
if !os.IsNotExist(err) {
return err
}
}
switch r := resource.(type) {
case RegularFile:
if fi == nil {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %w", resource.Path(), err)
}
chmod = false
} else {
if !fi.Mode().IsRegular() {
return fmt.Errorf("file %q should be a regular file, but is not", resource.Path())
}
if fi.Size() != r.Size() {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %w", resource.Path(), err)
}
} else {
for _, dgst := range r.Digests() {
f, err := os.Open(fp)
if err != nil {
return fmt.Errorf("failure opening file for read %q: %w", resource.Path(), err)
}
compared, err := dgst.Algorithm().FromReader(f)
if err == nil && dgst != compared {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %w", resource.Path(), err)
}
break
}
if err1 := f.Close(); err == nil {
err = err1
}
if err != nil {
return fmt.Errorf("error checking digest for %q: %w", resource.Path(), err)
}
}
}
}
case Directory:
if fi == nil {
if err := c.driver.Mkdir(fp, resource.Mode()); err != nil {
return err
}
} else if !fi.Mode().IsDir() {
return fmt.Errorf("%q should be a directory, but is not", resource.Path())
}
case SymLink:
var target string // only possibly set if target resource is a symlink
if fi != nil {
if fi.Mode()&os.ModeSymlink != 0 {
target, err = c.driver.Readlink(fp)
if err != nil {
return err
}
}
}
if target != r.Target() {
if fi != nil {
if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory?
return err
}
}
if err := c.driver.Symlink(r.Target(), fp); err != nil {
return err
}
}
case Device:
if fi == nil {
if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
return err
}
} else if (fi.Mode() & os.ModeDevice) == 0 {
return fmt.Errorf("%q should be a device, but is not", resource.Path())
} else {
major, minor, err := devices.DeviceInfo(fi)
if err != nil {
return err
}
if major != r.Major() || minor != r.Minor() {
if err := c.driver.Remove(fp); err != nil {
return err
}
if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
return err
}
}
}
case NamedPipe:
if fi == nil {
if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil {
return err
}
} else if (fi.Mode() & os.ModeNamedPipe) == 0 {
return fmt.Errorf("%q should be a named pipe, but is not", resource.Path())
}
}
if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
for _, path := range h.Paths() {
if path == resource.Path() {
continue
}
lp, err := c.fullpath(path)
if err != nil {
return err
}
if _, fi := c.driver.Lstat(lp); fi == nil {
c.driver.Remove(lp)
}
if err := c.driver.Link(fp, lp); err != nil {
return err
}
}
}
// Update filemode if file was not created
if chmod {
if err := c.driver.Lchmod(fp, resource.Mode()); err != nil {
return err
}
}
if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil {
return err
}
if xattrer, ok := resource.(XAttrer); ok {
// For xattrs, only ensure that we have those defined in the resource
// and their values are set. We can ignore other xattrs. In other words,
// we only set xattres defined by resource but never remove.
if _, ok := resource.(SymLink); ok {
lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
if !ok {
return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path())
}
if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil {
return err
}
} else {
xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
if !ok {
return fmt.Errorf("unsupported xattr for resource %q", resource.Path())
}
if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil {
return err
}
}
}
return nil
}
// Walk provides a convenience function to call filepath.Walk correctly for
// the context. Otherwise identical to filepath.Walk, the path argument is
// corrected to be contained within the context.
func (c *context) Walk(fn filepath.WalkFunc) error {
root := c.root
fi, err := c.driver.Lstat(c.root)
if err == nil && fi.Mode()&os.ModeSymlink != 0 {
root, err = c.driver.Readlink(c.root)
if err != nil {
return err
}
}
return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, _ error) error {
contained, err := c.containWithRoot(p, root)
return fn(contained, fi, err)
})
}
// fullpath returns the system path for the resource, joined with the context
// root. The path p must be a part of the context.
func (c *context) fullpath(p string) (string, error) {
p = c.pathDriver.Join(c.root, p)
if !strings.HasPrefix(p, c.root) {
return "", fmt.Errorf("invalid context path")
}
return p, nil
}
// containWithRoot cleans and santizes the filesystem path p to be an absolute path,
// effectively relative to the passed root. Extra care should be used when calling this
// instead of contain. This is needed for Walk, as if context root is a symlink,
// it must be evaluated prior to the Walk
func (c *context) containWithRoot(p string, root string) (string, error) {
sanitized, err := c.pathDriver.Rel(root, p)
if err != nil {
return "", err
}
// ZOMBIES(stevvooe): In certain cases, we may want to remap these to a
// "containment error", so the caller can decide what to do.
return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil
}
// digest returns the digest of the file at path p, relative to the root.
func (c *context) digest(p string) (digest.Digest, error) {
f, err := c.driver.Open(c.pathDriver.Join(c.root, p))
if err != nil {
return "", err
}
defer f.Close()
return c.digester.Digest(f)
}
// resolveXAttrs attempts to resolve the extended attributes for the resource
// at the path fp, which is the full path to the resource. If the resource
// cannot have xattrs, nil will be returned.
func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) {
if fi.Mode().IsRegular() || fi.Mode().IsDir() {
xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
if !ok {
return nil, fmt.Errorf("xattr extraction is not supported: %w", ErrNotSupported)
}
return xattrDriver.Getxattr(fp)
}
if fi.Mode()&os.ModeSymlink != 0 {
lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
if !ok {
return nil, fmt.Errorf("xattr extraction for symlinks is not supported: %w", ErrNotSupported)
}
return lxattrDriver.LGetxattr(fp)
}
return nil, nil
}

View File

@ -0,0 +1,21 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import "fmt"
var ErrNotSupported = fmt.Errorf("not supported")

View File

@ -0,0 +1,76 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import (
"fmt"
"os"
"syscall"
"golang.org/x/sys/unix"
)
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo")
}
//nolint:unconvert
dev := uint64(sys.Rdev)
return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil
}
// mknod provides a shortcut for syscall.Mknod
func Mknod(p string, mode os.FileMode, maj, min int) error {
var (
m = syscallMode(mode.Perm())
dev uint64
)
if mode&os.ModeDevice != 0 {
dev = unix.Mkdev(uint32(maj), uint32(min))
if mode&os.ModeCharDevice != 0 {
m |= unix.S_IFCHR
} else {
m |= unix.S_IFBLK
}
} else if mode&os.ModeNamedPipe != 0 {
m |= unix.S_IFIFO
}
return mknod(p, m, dev)
}
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= unix.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= unix.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= unix.S_ISVTX
}
return
}

View File

@ -0,0 +1,26 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import (
"fmt"
"os"
)
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
return 0, 0, fmt.Errorf("cannot get device info on windows: %w", ErrNotSupported)
}

View File

@ -0,0 +1,26 @@
//go:build freebsd
// +build freebsd
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import "golang.org/x/sys/unix"
func mknod(path string, mode uint32, dev uint64) (err error) {
return unix.Mknod(path, mode, dev)
}

View File

@ -0,0 +1,26 @@
//go:build !(freebsd || windows)
// +build !freebsd,!windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import "golang.org/x/sys/unix"
func mknod(path string, mode uint32, dev uint64) (err error) {
return unix.Mknod(path, mode, int(dev))
}

100
vendor/github.com/containerd/continuity/digests.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"io"
"sort"
"github.com/opencontainers/go-digest"
)
// Digester produces a digest for a given read stream
type Digester interface {
Digest(io.Reader) (digest.Digest, error)
}
// ContentProvider produces a read stream for a given digest
type ContentProvider interface {
Reader(digest.Digest) (io.ReadCloser, error)
}
type simpleDigester struct {
algorithm digest.Algorithm
}
func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) {
digester := sd.algorithm.Digester()
if _, err := io.Copy(digester.Hash(), r); err != nil {
return "", err
}
return digester.Digest(), nil
}
// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the
// digests are not repeated and no two digests with the same algorithm have
// different values. Because a stable sort is used, this has the effect of
// "zipping" digest collections from multiple resources.
func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) {
sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here.
seen := map[digest.Digest]struct{}{}
algs := map[digest.Algorithm][]digest.Digest{} // detect different digests.
var out []digest.Digest
// uniqify the digests
for _, d := range digests {
if _, ok := seen[d]; ok {
continue
}
seen[d] = struct{}{}
algs[d.Algorithm()] = append(algs[d.Algorithm()], d)
if len(algs[d.Algorithm()]) > 1 {
return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm())
}
out = append(out, d)
}
return out, nil
}
// digestsMatch compares the two sets of digests to see if they match.
func digestsMatch(as, bs []digest.Digest) bool {
all := append(as, bs...)
uniqified, err := uniqifyDigests(all...)
if err != nil {
// the only error uniqifyDigests returns is when the digests disagree.
return false
}
disjoint := len(as) + len(bs)
// if these two sets have the same cardinality, we know both sides
// didn't share any digests.
return len(uniqified) != disjoint
}
type digestSlice []digest.Digest
func (p digestSlice) Len() int { return len(p) }
func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@ -0,0 +1,178 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"fmt"
"io"
"os"
)
var ErrNotSupported = fmt.Errorf("not supported")
// Driver provides all of the system-level functions in a common interface.
// The context should call these with full paths and should never use the `os`
// package or any other package to access resources on the filesystem. This
// mechanism let's us carefully control access to the context and maintain
// path and resource integrity. It also gives us an interface to reason about
// direct resource access.
//
// Implementations don't need to do much other than meet the interface. For
// example, it is not required to wrap os.FileInfo to return correct paths for
// the call to Name().
type Driver interface {
// Note that Open() returns a File interface instead of *os.File. This
// is because os.File is a struct, so if Open was to return *os.File,
// the only way to fulfill the interface would be to call os.Open()
Open(path string) (File, error)
OpenFile(path string, flag int, perm os.FileMode) (File, error)
Stat(path string) (os.FileInfo, error)
Lstat(path string) (os.FileInfo, error)
Readlink(p string) (string, error)
Mkdir(path string, mode os.FileMode) error
Remove(path string) error
Link(oldname, newname string) error
Lchmod(path string, mode os.FileMode) error
Lchown(path string, uid, gid int64) error
Symlink(oldname, newname string) error
MkdirAll(path string, perm os.FileMode) error
RemoveAll(path string) error
// TODO(aaronl): These methods might move outside the main Driver
// interface in the future as more platforms are added.
Mknod(path string, mode os.FileMode, major int, minor int) error
Mkfifo(path string, mode os.FileMode) error
}
// File is the interface for interacting with files returned by continuity's Open
// This is needed since os.File is a struct, instead of an interface, so it can't
// be used.
type File interface {
io.ReadWriteCloser
io.Seeker
Readdir(n int) ([]os.FileInfo, error)
}
func NewSystemDriver() (Driver, error) {
// TODO(stevvooe): Consider having this take a "hint" path argument, which
// would be the context root. The hint could be used to resolve required
// filesystem support when assembling the driver to use.
return &driver{}, nil
}
// XAttrDriver should be implemented on operation systems and filesystems that
// have xattr support for regular files and directories.
type XAttrDriver interface {
// Getxattr returns all of the extended attributes for the file at path.
// Typically, this takes a syscall call to Listxattr and Getxattr.
Getxattr(path string) (map[string][]byte, error)
// Setxattr sets all of the extended attributes on file at path, following
// any symbolic links, if necessary. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
Setxattr(path string, attr map[string][]byte) error
}
// LXAttrDriver should be implemented by drivers on operating systems and
// filesystems that support setting and getting extended attributes on
// symbolic links. If this is not implemented, extended attributes will be
// ignored on symbolic links.
type LXAttrDriver interface {
// LGetxattr returns all of the extended attributes for the file at path
// and does not follow symlinks. Typically, this takes a syscall call to
// Llistxattr and Lgetxattr.
LGetxattr(path string) (map[string][]byte, error)
// LSetxattr sets all of the extended attributes on file at path, without
// following symbolic links. All attributes on the target are replaced by
// the values from attr. If the operation fails to set any attribute,
// those already applied will not be rolled back.
LSetxattr(path string, attr map[string][]byte) error
}
type DeviceInfoDriver interface {
DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error)
}
// driver is a simple default implementation that sends calls out to the "os"
// package. Extend the "driver" type in system-specific files to add support,
// such as xattrs, which can add support at compile time.
type driver struct{}
var _ File = &os.File{}
// LocalDriver is the exported Driver struct for convenience.
var LocalDriver Driver = &driver{}
func (d *driver) Open(p string) (File, error) {
return os.Open(p)
}
func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) {
return os.OpenFile(path, flag, perm)
}
func (d *driver) Stat(p string) (os.FileInfo, error) {
return os.Stat(p)
}
func (d *driver) Lstat(p string) (os.FileInfo, error) {
return os.Lstat(p)
}
func (d *driver) Readlink(p string) (string, error) {
return os.Readlink(p)
}
func (d *driver) Mkdir(p string, mode os.FileMode) error {
return os.Mkdir(p, mode)
}
// Remove is used to unlink files and remove directories.
// This is following the golang os package api which
// combines the operations into a higher level Remove
// function. If explicit unlinking or directory removal
// to mirror system call is required, they should be
// split up at that time.
func (d *driver) Remove(path string) error {
return os.Remove(path)
}
func (d *driver) Link(oldname, newname string) error {
return os.Link(oldname, newname)
}
func (d *driver) Lchown(name string, uid, gid int64) error {
// TODO: error out if uid excesses int bit width?
return os.Lchown(name, int(uid), int(gid))
}
func (d *driver) Symlink(oldname, newname string) error {
return os.Symlink(oldname, newname)
}
func (d *driver) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (d *driver) RemoveAll(path string) error {
return os.RemoveAll(path)
}

View File

@ -0,0 +1,134 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"errors"
"fmt"
"os"
"sort"
"github.com/containerd/continuity/devices"
"github.com/containerd/continuity/sysx"
)
func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
err := devices.Mknod(path, mode, major, minor)
if err != nil {
err = &os.PathError{Op: "mknod", Path: path, Err: err}
}
return err
}
func (d *driver) Mkfifo(path string, mode os.FileMode) error {
if mode&os.ModeNamedPipe == 0 {
return errors.New("mode passed to Mkfifo does not have the named pipe bit set")
}
// mknod with a mode that has ModeNamedPipe set creates a fifo, not a
// device.
err := devices.Mknod(path, mode, 0, 0)
if err != nil {
err = &os.PathError{Op: "mkfifo", Path: path, Err: err}
}
return err
}
// Getxattr returns all of the extended attributes for the file at path p.
func (d *driver) Getxattr(p string) (map[string][]byte, error) {
xattrs, err := sysx.Listxattr(p)
if err != nil {
return nil, fmt.Errorf("listing %s xattrs: %w", p, err)
}
sort.Strings(xattrs)
m := make(map[string][]byte, len(xattrs))
for _, attr := range xattrs {
value, err := sysx.Getxattr(p, attr)
if err != nil {
return nil, fmt.Errorf("getting %q xattr on %s: %w", attr, p, err)
}
// NOTE(stevvooe): This append/copy tricky relies on unique
// xattrs. Break this out into an alloc/copy if xattrs are no
// longer unique.
m[attr] = append(m[attr], value...)
}
return m, nil
}
// Setxattr sets all of the extended attributes on file at path, following
// any symbolic links, if necessary. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
func (d *driver) Setxattr(path string, attrMap map[string][]byte) error {
for attr, value := range attrMap {
if err := sysx.Setxattr(path, attr, value, 0); err != nil {
return fmt.Errorf("error setting xattr %q on %s: %w", attr, path, err)
}
}
return nil
}
// LGetxattr returns all of the extended attributes for the file at path p
// not following symbolic links.
func (d *driver) LGetxattr(p string) (map[string][]byte, error) {
xattrs, err := sysx.LListxattr(p)
if err != nil {
return nil, fmt.Errorf("listing %s xattrs: %w", p, err)
}
sort.Strings(xattrs)
m := make(map[string][]byte, len(xattrs))
for _, attr := range xattrs {
value, err := sysx.LGetxattr(p, attr)
if err != nil {
return nil, fmt.Errorf("getting %q xattr on %s: %w", attr, p, err)
}
// NOTE(stevvooe): This append/copy tricky relies on unique
// xattrs. Break this out into an alloc/copy if xattrs are no
// longer unique.
m[attr] = append(m[attr], value...)
}
return m, nil
}
// LSetxattr sets all of the extended attributes on file at path, not
// following any symbolic links. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error {
for attr, value := range attrMap {
if err := sysx.LSetxattr(path, attr, value, 0); err != nil {
return fmt.Errorf("error setting xattr %q on %s: %w", attr, path, err)
}
}
return nil
}
func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) {
return devices.DeviceInfo(fi)
}

View File

@ -0,0 +1,42 @@
//go:build go1.13
// +build go1.13
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Go 1.13 is the minimally supported version for Windows.
// Earlier golang releases have bug in os.Readlink
// (see https://github.com/golang/go/issues/30463).
package driver
import (
"os"
)
func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
return &os.PathError{Op: "mknod", Path: path, Err: ErrNotSupported}
}
func (d *driver) Mkfifo(path string, mode os.FileMode) error {
return &os.PathError{Op: "mkfifo", Path: path, Err: ErrNotSupported}
}
// Lchmod changes the mode of an file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
// TODO: Use Window's equivalent
return os.Chmod(path, mode)
}

View File

@ -0,0 +1,39 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"os"
"golang.org/x/sys/unix"
)
// Lchmod changes the mode of a file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) error {
// On Linux, file mode is not supported for symlinks,
// and fchmodat() does not support AT_SYMLINK_NOFOLLOW,
// so symlinks need to be skipped entirely.
if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 {
return nil
}
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0)
if err != nil {
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
}
return err
}

View File

@ -0,0 +1,35 @@
//go:build darwin || freebsd || netbsd || openbsd || solaris
// +build darwin freebsd netbsd openbsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"os"
"golang.org/x/sys/unix"
)
// Lchmod changes the mode of a file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) error {
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
}
return err
}

View File

@ -0,0 +1,89 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"io"
"os"
"sort"
)
// ReadFile works the same as os.ReadFile with the Driver abstraction
func ReadFile(r Driver, filename string) ([]byte, error) {
f, err := r.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
data, err := io.ReadAll(f)
if err != nil {
return nil, err
}
return data, nil
}
// WriteFile works the same as os.WriteFile with the Driver abstraction
func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error {
f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer f.Close()
n, err := f.Write(data)
if err != nil {
return err
} else if n != len(data) {
return io.ErrShortWrite
}
return nil
}
// ReadDir works the same as ioutil.ReadDir with the Driver abstraction
func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) {
f, err := r.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
dirs, err := f.Readdir(-1)
if err != nil {
return nil, err
}
sort.Sort(fileInfos(dirs))
return dirs, nil
}
// Simple implementation of the sort.Interface for os.FileInfo
type fileInfos []os.FileInfo
func (fis fileInfos) Len() int {
return len(fis)
}
func (fis fileInfos) Less(i, j int) bool {
return fis[i].Name() < fis[j].Name()
}
func (fis fileInfos) Swap(i, j int) {
fis[i], fis[j] = fis[j], fis[i]
}

View File

@ -0,0 +1,68 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"fmt"
"os"
"github.com/containerd/continuity"
)
// CheckDirectoryEqual compares two directory paths to make sure that
// the content of the directories is the same.
func CheckDirectoryEqual(d1, d2 string) error {
c1, err := continuity.NewContext(d1)
if err != nil {
return fmt.Errorf("failed to build context: %w", err)
}
c2, err := continuity.NewContext(d2)
if err != nil {
return fmt.Errorf("failed to build context: %w", err)
}
m1, err := continuity.BuildManifest(c1)
if err != nil {
return fmt.Errorf("failed to build manifest: %w", err)
}
m2, err := continuity.BuildManifest(c2)
if err != nil {
return fmt.Errorf("failed to build manifest: %w", err)
}
diff := diffResourceList(m1.Resources, m2.Resources)
if diff.HasDiff() {
return fmt.Errorf("directory diff between %s and %s\n%s", d1, d2, diff.String())
}
return nil
}
// CheckDirectoryEqualWithApplier compares directory against applier
func CheckDirectoryEqualWithApplier(root string, a Applier) error {
applied, err := os.MkdirTemp("", "fstest")
if err != nil {
return err
}
defer os.RemoveAll(applied)
if err := a.Apply(applied); err != nil {
return err
}
return CheckDirectoryEqual(applied, root)
}

View File

@ -0,0 +1,22 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
var metadataFiles map[string]bool

View File

@ -0,0 +1,24 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
// TODO: Any more metadata files generated by Windows layers?
// TODO: Also skip Recycle Bin contents in Windows layers which is used to store deleted files in some cases
var metadataFiles = map[string]bool{
"\\System Volume Information": true,
"\\WcSandboxState": true,
}

View File

@ -0,0 +1,215 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"bytes"
"fmt"
"github.com/containerd/continuity"
)
type resourceUpdate struct {
Original continuity.Resource
Updated continuity.Resource
}
func (u resourceUpdate) String() string {
return fmt.Sprintf("%s(mode: %o, uid: %d, gid: %d) -> %s(mode: %o, uid: %d, gid: %d)",
u.Original.Path(), u.Original.Mode(), u.Original.UID(), u.Original.GID(),
u.Updated.Path(), u.Updated.Mode(), u.Updated.UID(), u.Updated.GID(),
)
}
type resourceListDifference struct {
Additions []continuity.Resource
Deletions []continuity.Resource
Updates []resourceUpdate
}
func (l resourceListDifference) HasDiff() bool {
if len(l.Deletions) > 0 || len(l.Updates) > 0 || (len(metadataFiles) == 0 && len(l.Additions) > 0) {
return true
}
for _, add := range l.Additions {
if ok := metadataFiles[add.Path()]; !ok {
return true
}
}
return false
}
func (l resourceListDifference) String() string {
buf := bytes.NewBuffer(nil)
for _, add := range l.Additions {
fmt.Fprintf(buf, "+ %s\n", add.Path())
}
for _, del := range l.Deletions {
fmt.Fprintf(buf, "- %s\n", del.Path())
}
for _, upt := range l.Updates {
fmt.Fprintf(buf, "~ %s\n", upt.String())
}
return buf.String()
}
// diffManifest compares two resource lists and returns the list
// of adds updates and deletes, resource lists are not reordered
// before doing difference.
func diffResourceList(r1, r2 []continuity.Resource) resourceListDifference {
i1 := 0
i2 := 0
var d resourceListDifference
for i1 < len(r1) && i2 < len(r2) {
p1 := r1[i1].Path()
p2 := r2[i2].Path()
switch {
case p1 < p2:
d.Deletions = append(d.Deletions, r1[i1])
i1++
case p1 == p2:
if !compareResource(r1[i1], r2[i2]) {
d.Updates = append(d.Updates, resourceUpdate{
Original: r1[i1],
Updated: r2[i2],
})
}
i1++
i2++
case p1 > p2:
d.Additions = append(d.Additions, r2[i2])
i2++
}
}
for i1 < len(r1) {
d.Deletions = append(d.Deletions, r1[i1])
i1++
}
for i2 < len(r2) {
d.Additions = append(d.Additions, r2[i2])
i2++
}
return d
}
func compareResource(r1, r2 continuity.Resource) bool {
if r1.Path() != r2.Path() {
return false
}
if r1.Mode() != r2.Mode() {
return false
}
if r1.UID() != r2.UID() {
return false
}
if r1.GID() != r2.GID() {
return false
}
// TODO(dmcgowan): Check if is XAttrer
return compareResourceTypes(r1, r2)
}
func compareResourceTypes(r1, r2 continuity.Resource) bool {
switch t1 := r1.(type) {
case continuity.RegularFile:
t2, ok := r2.(continuity.RegularFile)
if !ok {
return false
}
return compareRegularFile(t1, t2)
case continuity.Directory:
t2, ok := r2.(continuity.Directory)
if !ok {
return false
}
return compareDirectory(t1, t2)
case continuity.SymLink:
t2, ok := r2.(continuity.SymLink)
if !ok {
return false
}
return compareSymLink(t1, t2)
case continuity.NamedPipe:
t2, ok := r2.(continuity.NamedPipe)
if !ok {
return false
}
return compareNamedPipe(t1, t2)
case continuity.Device:
t2, ok := r2.(continuity.Device)
if !ok {
return false
}
return compareDevice(t1, t2)
default:
// TODO(dmcgowan): Should this panic?
return r1 == r2
}
}
func compareRegularFile(r1, r2 continuity.RegularFile) bool {
if r1.Size() != r2.Size() {
return false
}
p1 := r1.Paths()
p2 := r2.Paths()
if len(p1) != len(p2) {
return false
}
for i := range p1 {
if p1[i] != p2[i] {
return false
}
}
d1 := r1.Digests()
d2 := r2.Digests()
if len(d1) != len(d2) {
return false
}
for i := range d1 {
if d1[i] != d2[i] {
return false
}
}
return true
}
func compareSymLink(r1, r2 continuity.SymLink) bool {
return r1.Target() == r2.Target()
}
func compareDirectory(r1, r2 continuity.Directory) bool {
return true
}
func compareNamedPipe(r1, r2 continuity.NamedPipe) bool {
return true
}
func compareDevice(r1, r2 continuity.Device) bool {
return r1.Major() == r2.Major() && r1.Minor() == r2.Minor()
}

View File

@ -0,0 +1,184 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"bytes"
"io"
"math/rand"
"os"
"path/filepath"
"syscall"
"time"
)
// Applier applies single file changes
type Applier interface {
Apply(root string) error
}
type applyFn func(root string) error
func (a applyFn) Apply(root string) error {
return a(root)
}
// CreateFile returns a file applier which creates a file as the
// provided name with the given content and permission.
func CreateFile(name string, content []byte, perm os.FileMode) Applier {
f := func() io.Reader {
return bytes.NewReader(content)
}
return writeFileStream(name, f, perm)
}
// CreateRandomFile returns a file applier which creates a file with random
// content of the given size using the given seed and permission.
func CreateRandomFile(name string, seed, size int64, perm os.FileMode) Applier {
f := func() io.Reader {
return io.LimitReader(rand.New(rand.NewSource(seed)), size)
}
return writeFileStream(name, f, perm)
}
// writeFileStream returns a file applier which creates a file as the
// provided name with the given content from the provided i/o stream and permission.
func writeFileStream(name string, stream func() io.Reader, perm os.FileMode) Applier {
return applyFn(func(root string) (retErr error) {
fullPath := filepath.Join(root, name)
f, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer func() {
err := f.Close()
if err != nil && retErr == nil {
retErr = err
}
}()
_, err = io.Copy(f, stream())
if err != nil {
return err
}
return os.Chmod(fullPath, perm)
})
}
// Remove returns a file applier which removes the provided file name
func Remove(name string) Applier {
return applyFn(func(root string) error {
return os.Remove(filepath.Join(root, name))
})
}
// RemoveAll returns a file applier which removes the provided file name
// as in os.RemoveAll
func RemoveAll(name string) Applier {
return applyFn(func(root string) error {
return os.RemoveAll(filepath.Join(root, name))
})
}
// CreateDir returns a file applier to create the directory with
// the provided name and permission
func CreateDir(name string, perm os.FileMode) Applier {
return applyFn(func(root string) error {
fullPath := filepath.Join(root, name)
if err := os.MkdirAll(fullPath, perm); err != nil {
return err
}
return os.Chmod(fullPath, perm)
})
}
// Rename returns a file applier which renames a file
func Rename(old, new string) Applier {
return applyFn(func(root string) error {
return os.Rename(filepath.Join(root, old), filepath.Join(root, new))
})
}
// Chown returns a file applier which changes the ownership of a file
func Chown(name string, uid, gid int) Applier {
return applyFn(func(root string) error {
return os.Chown(filepath.Join(root, name), uid, gid)
})
}
// Chtimes changes access and mod time of file.
// Use Lchtimes for symbolic links.
func Chtimes(name string, atime, mtime time.Time) Applier {
return applyFn(func(root string) error {
return os.Chtimes(filepath.Join(root, name), atime, mtime)
})
}
// Chmod returns a file applier which changes the file permission
func Chmod(name string, perm os.FileMode) Applier {
return applyFn(func(root string) error {
return os.Chmod(filepath.Join(root, name), perm)
})
}
// Symlink returns a file applier which creates a symbolic link
func Symlink(oldname, newname string) Applier {
return applyFn(func(root string) error {
return os.Symlink(oldname, filepath.Join(root, newname))
})
}
// Link returns a file applier which creates a hard link
func Link(oldname, newname string) Applier {
return applyFn(func(root string) error {
return os.Link(filepath.Join(root, oldname), filepath.Join(root, newname))
})
}
// TODO: Make platform specific, windows applier is always no-op
//func Mknod(name string, mode int32, dev int) Applier {
// return func(root string) error {
// return return syscall.Mknod(path, mode, dev)
// }
//}
func CreateSocket(name string, perm os.FileMode) Applier {
return applyFn(func(root string) error {
fullPath := filepath.Join(root, name)
fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0)
if err != nil {
return err
}
defer syscall.Close(fd)
sa := &syscall.SockaddrUnix{Name: fullPath}
if err := syscall.Bind(fd, sa); err != nil {
return err
}
return os.Chmod(fullPath, perm)
})
}
// Apply returns a new applier from the given appliers
func Apply(appliers ...Applier) Applier {
return applyFn(func(root string) error {
for _, a := range appliers {
if err := a.Apply(root); err != nil {
return err
}
}
return nil
})
}

View File

@ -0,0 +1,54 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"path/filepath"
"time"
"github.com/containerd/continuity/sysx"
"golang.org/x/sys/unix"
)
// SetXAttr sets the xatter for the file
func SetXAttr(name, key, value string) Applier {
return applyFn(func(root string) error {
path := filepath.Join(root, name)
return sysx.LSetxattr(path, key, []byte(value), 0)
})
}
// Lchtimes changes access and mod time of file without following symlink
func Lchtimes(name string, atime, mtime time.Time) Applier {
return applyFn(func(root string) error {
path := filepath.Join(root, name)
at := unix.NsecToTimespec(atime.UnixNano())
mt := unix.NsecToTimespec(mtime.UnixNano())
utimes := [2]unix.Timespec{at, mt}
return unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW)
})
}
func Base() Applier {
return applyFn(func(root string) error {
// do nothing, as the base is not special
return nil
})
}

View File

@ -0,0 +1,44 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"errors"
"time"
)
// Lchtimes changes access and mod time of file without following symlink
func Lchtimes(name string, atime, mtime time.Time) Applier {
return applyFn(func(root string) error {
return errors.New("Not implemented")
})
}
// Base applies the files required to make a valid Windows container layer
// that the filter will mount. It is used for testing the snapshotter
func Base() Applier {
return Apply(
CreateDir("Windows", 0755),
CreateDir("Windows/System32", 0755),
CreateDir("Windows/System32/Config", 0755),
CreateFile("Windows/System32/Config/SYSTEM", []byte("foo\n"), 0777),
CreateFile("Windows/System32/Config/SOFTWARE", []byte("foo\n"), 0777),
CreateFile("Windows/System32/Config/SAM", []byte("foo\n"), 0777),
CreateFile("Windows/System32/Config/SECURITY", []byte("foo\n"), 0777),
CreateFile("Windows/System32/Config/DEFAULT", []byte("foo\n"), 0777),
)
}

View File

@ -0,0 +1,236 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fstest
import (
"context"
"os"
"testing"
)
// TestApplier applies the test context
type TestApplier interface {
TestContext(context.Context) (context.Context, func(), error)
Apply(context.Context, Applier) (string, func(), error)
}
// FSSuite runs the path test suite
func FSSuite(t *testing.T, a TestApplier) {
t.Run("Basic", makeTest(t, a, basicTest))
t.Run("Deletion", makeTest(t, a, deletionTest))
t.Run("Update", makeTest(t, a, updateTest))
t.Run("DirectoryPermission", makeTest(t, a, directoryPermissionsTest))
t.Run("ParentDirectoryPermission", makeTest(t, a, parentDirectoryPermissionsTest))
t.Run("HardlinkUnmodified", makeTest(t, a, hardlinkUnmodified))
t.Run("HardlinkBeforeUnmodified", makeTest(t, a, hardlinkBeforeUnmodified))
t.Run("HardlinkBeforeModified", makeTest(t, a, hardlinkBeforeModified))
}
func makeTest(t *testing.T, ta TestApplier, as []Applier) func(t *testing.T) {
return func(t *testing.T) {
ctx, cleanup, err := ta.TestContext(context.Background())
if err != nil {
t.Fatalf("Unable to get test context: %+v", err)
}
defer cleanup()
applyDir, err := os.MkdirTemp("", "test-expected-")
if err != nil {
t.Fatalf("Unable to make temp directory: %+v", err)
}
defer os.RemoveAll(applyDir)
for i, a := range as {
testDir, c, err := ta.Apply(ctx, a)
if err != nil {
t.Fatalf("Apply failed at %d: %+v", i, err)
}
if err := a.Apply(applyDir); err != nil {
if c != nil {
c()
}
t.Fatalf("Error applying change to apply directory: %+v", err)
}
err = CheckDirectoryEqual(applyDir, testDir)
if c != nil {
c()
}
if err != nil {
t.Fatalf("Directories not equal at %d (expected <> tested): %+v", i, err)
}
}
}
}
var (
// baseApplier creates a basic filesystem layout
// with multiple types of files for basic tests.
baseApplier = Apply(
CreateDir("/etc/", 0755),
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost"), 0644),
Link("/etc/hosts", "/etc/hosts.allow"),
CreateDir("/usr/local/lib", 0755),
CreateFile("/usr/local/lib/libnothing.so", []byte{0x00, 0x00}, 0755),
Symlink("libnothing.so", "/usr/local/lib/libnothing.so.2"),
CreateDir("/home", 0755),
CreateDir("/home/derek", 0700),
// TODO: CreateSocket: how should Sockets be handled in continuity?
)
// basicTest covers basic operations
basicTest = []Applier{
baseApplier,
Apply(
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644),
CreateFile("/etc/fstab", []byte("/dev/sda1\t/\text4\tdefaults 1 1\n"), 0600),
CreateFile("/etc/badfile", []byte(""), 0666),
CreateFile("/home/derek/.zshrc", []byte("#ZSH is just better\n"), 0640),
),
Apply(
Remove("/etc/badfile"),
Rename("/home/derek", "/home/notderek"),
),
Apply(
RemoveAll("/usr"),
Remove("/etc/hosts.allow"),
),
Apply(
RemoveAll("/home"),
CreateDir("/home/derek", 0700),
CreateFile("/home/derek/.bashrc", []byte("#not going away\n"), 0640),
Link("/etc/hosts", "/etc/hosts.allow"),
),
}
// deletionTest covers various deletion scenarios to ensure
// deletions are properly picked up and applied
deletionTest = []Applier{
Apply(
CreateDir("/test/somedir", 0755),
CreateDir("/lib", 0700),
CreateFile("/lib/hidden", []byte{}, 0644),
),
Apply(
CreateFile("/test/a", []byte{}, 0644),
CreateFile("/test/b", []byte{}, 0644),
CreateDir("/test/otherdir", 0755),
CreateFile("/test/otherdir/.empty", []byte{}, 0644),
RemoveAll("/lib"),
CreateDir("/lib", 0700),
CreateFile("/lib/not-hidden", []byte{}, 0644),
),
Apply(
Remove("/test/a"),
Remove("/test/b"),
RemoveAll("/test/otherdir"),
CreateFile("/lib/newfile", []byte{}, 0644),
),
}
// updateTest covers file updates for content and permission
updateTest = []Applier{
Apply(
CreateDir("/d1", 0755),
CreateDir("/d2", 0700),
CreateFile("/d1/f1", []byte("something..."), 0644),
CreateFile("/d1/f2", []byte("else..."), 0644),
CreateFile("/d1/f3", []byte("entirely..."), 0644),
),
Apply(
CreateFile("/d1/f1", []byte("file content of a different length"), 0664),
Remove("/d1/f3"),
CreateFile("/d1/f3", []byte("updated content"), 0664),
Chmod("/d1/f2", 0766),
Chmod("/d2", 0777),
),
}
// directoryPermissionsTest covers directory permissions on update
directoryPermissionsTest = []Applier{
Apply(
CreateDir("/d1", 0700),
CreateDir("/d2", 0751),
CreateDir("/d3", 0777),
),
Apply(
CreateFile("/d1/f", []byte("irrelevant"), 0644),
CreateDir("/d1/d", 0700),
CreateFile("/d1/d/f", []byte("irrelevant"), 0644),
CreateFile("/d2/f", []byte("irrelevant"), 0644),
CreateFile("/d3/f", []byte("irrelevant"), 0644),
),
}
// parentDirectoryPermissionsTest covers directory permissions for updated
// files
parentDirectoryPermissionsTest = []Applier{
Apply(
CreateDir("/d1", 0700),
CreateDir("/d1/a", 0700),
CreateDir("/d1/a/b", 0700),
CreateDir("/d1/a/b/c", 0700),
CreateFile("/d1/a/b/f", []byte("content1"), 0644),
CreateDir("/d2", 0751),
CreateDir("/d2/a/b", 0751),
CreateDir("/d2/a/b/c", 0751),
CreateFile("/d2/a/b/f", []byte("content1"), 0644),
),
Apply(
CreateFile("/d1/a/b/f", []byte("content1"), 0644),
Chmod("/d1/a/b/c", 0700),
CreateFile("/d2/a/b/f", []byte("content2"), 0644),
Chmod("/d2/a/b/c", 0751),
),
}
hardlinkUnmodified = []Applier{
baseApplier,
Apply(
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644),
),
Apply(
Link("/etc/hosts", "/etc/hosts.deny"),
),
}
// Hardlink name before with modification
// Tests link is created for unmodified files when a new hard linked file is seen first
hardlinkBeforeUnmodified = []Applier{
baseApplier,
Apply(
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644),
),
Apply(
Link("/etc/hosts", "/etc/before-hosts"),
),
}
// Hardlink name after without modification
// tests link is created for modified file with new hardlink
hardlinkBeforeModified = []Applier{
baseApplier,
Apply(
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost.localdomain"), 0644),
),
Apply(
Remove("/etc/hosts"),
CreateFile("/etc/hosts", []byte("127.0.0.1 localhost"), 0644),
Link("/etc/hosts", "/etc/before-hosts"),
),
}
)

130
vendor/github.com/containerd/continuity/groups_unix.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:unused,deadcode
package continuity
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// TODO(stevvooe): This needs a lot of work before we can call it useful.
type groupIndex struct {
byName map[string]*group
byGID map[int]*group
}
func getGroupIndex() (*groupIndex, error) {
f, err := os.Open("/etc/group")
if err != nil {
return nil, err
}
defer f.Close()
groups, err := parseGroups(f)
if err != nil {
return nil, err
}
return newGroupIndex(groups), nil
}
func newGroupIndex(groups []group) *groupIndex {
gi := &groupIndex{
byName: make(map[string]*group),
byGID: make(map[int]*group),
}
for i, group := range groups {
gi.byGID[group.gid] = &groups[i]
gi.byName[group.name] = &groups[i]
}
return gi
}
type group struct {
name string
gid int
members []string
}
func getGroupName(gid int) (string, error) {
f, err := os.Open("/etc/group")
if err != nil {
return "", err
}
defer f.Close()
groups, err := parseGroups(f)
if err != nil {
return "", err
}
for _, group := range groups {
if group.gid == gid {
return group.name, nil
}
}
return "", fmt.Errorf("no group for gid")
}
// parseGroups parses an /etc/group file for group names, ids and membership.
// This is unix specific.
func parseGroups(rd io.Reader) ([]group, error) {
var groups []group
scanner := bufio.NewScanner(rd)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "#") {
continue // skip comment
}
parts := strings.SplitN(scanner.Text(), ":", 4)
if len(parts) != 4 {
return nil, fmt.Errorf("bad entry: %q", scanner.Text())
}
name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3]
gid, err := strconv.Atoi(sgid)
if err != nil {
return nil, fmt.Errorf("bad gid: %q", gid)
}
members := strings.Split(smembers, ",")
groups = append(groups, group{
name: name,
gid: gid,
members: members,
})
}
if scanner.Err() != nil {
return nil, scanner.Err()
}
return groups, nil
}

73
vendor/github.com/containerd/continuity/hardlinks.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
)
var (
errNotAHardLink = fmt.Errorf("invalid hardlink")
)
type hardlinkManager struct {
hardlinks map[hardlinkKey][]Resource
}
func newHardlinkManager() *hardlinkManager {
return &hardlinkManager{
hardlinks: map[hardlinkKey][]Resource{},
}
}
// Add attempts to add the resource to the hardlink manager. If the resource
// cannot be considered as a hardlink candidate, errNotAHardLink is returned.
func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error {
if _, ok := resource.(Hardlinkable); !ok {
return errNotAHardLink
}
key, err := newHardlinkKey(fi)
if err != nil {
return err
}
hlm.hardlinks[key] = append(hlm.hardlinks[key], resource)
return nil
}
// Merge processes the current state of the hardlink manager and merges any
// shared nodes into hard linked resources.
func (hlm *hardlinkManager) Merge() ([]Resource, error) {
var resources []Resource
for key, linked := range hlm.hardlinks {
if len(linked) < 1 {
return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key)
}
merged, err := Merge(linked...)
if err != nil {
return nil, fmt.Errorf("error merging hardlink: %w", err)
}
resources = append(resources, merged)
}
return resources, nil
}

View File

@ -0,0 +1,54 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
"syscall"
)
// hardlinkKey provides a tuple-key for managing hardlinks. This is system-
// specific.
type hardlinkKey struct {
dev uint64
inode uint64
}
// newHardlinkKey returns a hardlink key for the provided file info. If the
// resource does not represent a possible hardlink, errNotAHardLink will be
// returned.
func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo")
}
if sys.Nlink < 2 {
// NOTE(stevvooe): This is not always true for all filesystems. We
// should somehow detect this and provided a slow "polyfill" that
// leverages os.SameFile if we detect a filesystem where link counts
// is not really supported.
return hardlinkKey{}, errNotAHardLink
}
//nolint:unconvert
return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil
}

View File

@ -0,0 +1,28 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import "os"
type hardlinkKey struct{}
func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
// NOTE(stevvooe): Obviously, this is not yet implemented. However, the
// makings of an implementation are available in src/os/types_windows.go. More
// investigation needs to be done to figure out exactly how to do this.
return hardlinkKey{}, errNotAHardLink
}

62
vendor/github.com/containerd/continuity/ioutils.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"bytes"
"io"
"os"
"path/filepath"
)
// AtomicWriteFile atomically writes data to a file by first writing to a
// temp file and calling rename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
buf := bytes.NewBuffer(data)
return atomicWriteFile(filename, buf, int64(len(data)), perm)
}
// atomicWriteFile writes data to a file by first writing to a temp
// file and calling rename.
func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error {
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return err
}
err = os.Chmod(f.Name(), perm)
if err != nil {
f.Close()
return err
}
n, err := io.Copy(f, r)
if err == nil && n < dataSize {
f.Close()
return io.ErrShortWrite
}
if err != nil {
f.Close()
return err
}
if err := f.Sync(); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
return os.Rename(f.Name(), filename)
}

164
vendor/github.com/containerd/continuity/manifest.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"io"
"os"
"sort"
pb "github.com/containerd/continuity/proto"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
)
// Manifest provides the contents of a manifest. Users of this struct should
// not typically modify any fields directly.
type Manifest struct {
// Resources specifies all the resources for a manifest in order by path.
Resources []Resource
}
func Unmarshal(p []byte) (*Manifest, error) {
var bm pb.Manifest
if err := proto.Unmarshal(p, &bm); err != nil {
return nil, err
}
var m Manifest
for _, b := range bm.Resource {
r, err := fromProto(b)
if err != nil {
return nil, err
}
m.Resources = append(m.Resources, r)
}
return &m, nil
}
func Marshal(m *Manifest) ([]byte, error) {
var bm pb.Manifest
for _, resource := range m.Resources {
bm.Resource = append(bm.Resource, toProto(resource))
}
return proto.Marshal(&bm)
}
func MarshalText(w io.Writer, m *Manifest) error {
var bm pb.Manifest
for _, resource := range m.Resources {
bm.Resource = append(bm.Resource, toProto(resource))
}
b, err := prototext.Marshal(&bm)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
// BuildManifest creates the manifest for the given context
func BuildManifest(ctx Context) (*Manifest, error) {
resourcesByPath := map[string]Resource{}
hardLinks := newHardlinkManager()
if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error walking %s: %w", p, err)
}
if p == string(os.PathSeparator) {
// skip root
return nil
}
resource, err := ctx.Resource(p, fi)
if err != nil {
if err == ErrNotFound {
return nil
}
return fmt.Errorf("failed to get resource %q: %w", p, err)
}
// add to the hardlink manager
if err := hardLinks.Add(fi, resource); err == nil {
// Resource has been accepted by hardlink manager so we don't add
// it to the resourcesByPath until we merge at the end.
return nil
} else if err != errNotAHardLink {
// handle any other case where we have a proper error.
return fmt.Errorf("adding hardlink %s: %w", p, err)
}
resourcesByPath[p] = resource
return nil
}); err != nil {
return nil, err
}
// merge and post-process the hardlinks.
hardLinked, err := hardLinks.Merge()
if err != nil {
return nil, err
}
for _, resource := range hardLinked {
resourcesByPath[resource.Path()] = resource
}
var resources []Resource
for _, resource := range resourcesByPath {
resources = append(resources, resource)
}
sort.Stable(ByPath(resources))
return &Manifest{
Resources: resources,
}, nil
}
// VerifyManifest verifies all the resources in a manifest
// against files from the given context.
func VerifyManifest(ctx Context, manifest *Manifest) error {
for _, resource := range manifest.Resources {
if err := ctx.Verify(resource); err != nil {
return err
}
}
return nil
}
// ApplyManifest applies on the resources in a manifest to
// the given context.
func ApplyManifest(ctx Context, manifest *Manifest) error {
for _, resource := range manifest.Resources {
if err := ctx.Apply(resource); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,101 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pathdriver
import (
"path/filepath"
)
// PathDriver provides all of the path manipulation functions in a common
// interface. The context should call these and never use the `filepath`
// package or any other package to manipulate paths.
type PathDriver interface {
Join(paths ...string) string
IsAbs(path string) bool
Rel(base, target string) (string, error)
Base(path string) string
Dir(path string) string
Clean(path string) string
Split(path string) (dir, file string)
Separator() byte
Abs(path string) (string, error)
Walk(string, filepath.WalkFunc) error
FromSlash(path string) string
ToSlash(path string) string
Match(pattern, name string) (matched bool, err error)
}
// pathDriver is a simple default implementation calls the filepath package.
type pathDriver struct{}
// LocalPathDriver is the exported pathDriver struct for convenience.
var LocalPathDriver PathDriver = &pathDriver{}
func (*pathDriver) Join(paths ...string) string {
return filepath.Join(paths...)
}
func (*pathDriver) IsAbs(path string) bool {
return filepath.IsAbs(path)
}
func (*pathDriver) Rel(base, target string) (string, error) {
return filepath.Rel(base, target)
}
func (*pathDriver) Base(path string) string {
return filepath.Base(path)
}
func (*pathDriver) Dir(path string) string {
return filepath.Dir(path)
}
func (*pathDriver) Clean(path string) string {
return filepath.Clean(path)
}
func (*pathDriver) Split(path string) (dir, file string) {
return filepath.Split(path)
}
func (*pathDriver) Separator() byte {
return filepath.Separator
}
func (*pathDriver) Abs(path string) (string, error) {
return filepath.Abs(path)
}
// Note that filepath.Walk calls os.Stat, so if the context wants to
// to call Driver.Stat() for Walk, they need to create a new struct that
// overrides this method.
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
return filepath.Walk(root, walkFn)
}
func (*pathDriver) FromSlash(path string) string {
return filepath.FromSlash(path)
}
func (*pathDriver) ToSlash(path string) string {
return filepath.ToSlash(path)
}
func (*pathDriver) Match(pattern, name string) (bool, error) {
return filepath.Match(pattern, name)
}

21
vendor/github.com/containerd/continuity/proto/gen.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proto
//go:generate protoc --go_out=. manifest.proto
//go:generate mv github.com/containerd/continuity/proto/manifest.pb.go .
//go:generate rmdir -p github.com/containerd/continuity/proto

View File

@ -0,0 +1,525 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.12.4
// source: manifest.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Manifest specifies the entries in a container bundle, keyed and sorted by
// path.
type Manifest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Resource []*Resource `protobuf:"bytes,1,rep,name=resource,proto3" json:"resource,omitempty"`
}
func (x *Manifest) Reset() {
*x = Manifest{}
if protoimpl.UnsafeEnabled {
mi := &file_manifest_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Manifest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Manifest) ProtoMessage() {}
func (x *Manifest) ProtoReflect() protoreflect.Message {
mi := &file_manifest_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Manifest.ProtoReflect.Descriptor instead.
func (*Manifest) Descriptor() ([]byte, []int) {
return file_manifest_proto_rawDescGZIP(), []int{0}
}
func (x *Manifest) GetResource() []*Resource {
if x != nil {
return x.Resource
}
return nil
}
type Resource struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Path specifies the path from the bundle root. If more than one
// path is present, the entry may represent a hardlink, rather than using
// a link target. The path format is operating system specific.
Path []string `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"`
// Uid specifies the user id for the resource.
Uid int64 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"`
// Gid specifies the group id for the resource.
Gid int64 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"`
// user and group are not currently used but their field numbers have been
// reserved for future use. As such, they are marked as deprecated.
//
// Deprecated: Do not use.
User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` // "deprecated" stands for "reserved" here
// Deprecated: Do not use.
Group string `protobuf:"bytes,5,opt,name=group,proto3" json:"group,omitempty"` // "deprecated" stands for "reserved" here
// Mode defines the file mode and permissions. We've used the same
// bit-packing from Go's os package,
// http://golang.org/pkg/os/#FileMode, since they've done the work of
// creating a cross-platform layout.
Mode uint32 `protobuf:"varint,6,opt,name=mode,proto3" json:"mode,omitempty"`
// Size specifies the size in bytes of the resource. This is only valid
// for regular files.
Size uint64 `protobuf:"varint,7,opt,name=size,proto3" json:"size,omitempty"`
// Digest specifies the content digest of the target file. Only valid for
// regular files. The strings are formatted in OCI style, i.e. <alg>:<encoded>.
// For detailed information about the format, please refer to OCI Image Spec:
// https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification
// The digests are sorted in lexical order and implementations may choose
// which algorithms they prefer.
Digest []string `protobuf:"bytes,8,rep,name=digest,proto3" json:"digest,omitempty"`
// Target defines the target of a hard or soft link. Absolute links start
// with a slash and specify the resource relative to the bundle root.
// Relative links do not start with a slash and are relative to the
// resource path.
Target string `protobuf:"bytes,9,opt,name=target,proto3" json:"target,omitempty"`
// Major specifies the major device number for character and block devices.
Major uint64 `protobuf:"varint,10,opt,name=major,proto3" json:"major,omitempty"`
// Minor specifies the minor device number for character and block devices.
Minor uint64 `protobuf:"varint,11,opt,name=minor,proto3" json:"minor,omitempty"`
// Xattr provides storage for extended attributes for the target resource.
Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr,proto3" json:"xattr,omitempty"`
// Ads stores one or more alternate data streams for the target resource.
Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads,proto3" json:"ads,omitempty"`
}
func (x *Resource) Reset() {
*x = Resource{}
if protoimpl.UnsafeEnabled {
mi := &file_manifest_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Resource) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Resource) ProtoMessage() {}
func (x *Resource) ProtoReflect() protoreflect.Message {
mi := &file_manifest_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
func (*Resource) Descriptor() ([]byte, []int) {
return file_manifest_proto_rawDescGZIP(), []int{1}
}
func (x *Resource) GetPath() []string {
if x != nil {
return x.Path
}
return nil
}
func (x *Resource) GetUid() int64 {
if x != nil {
return x.Uid
}
return 0
}
func (x *Resource) GetGid() int64 {
if x != nil {
return x.Gid
}
return 0
}
// Deprecated: Do not use.
func (x *Resource) GetUser() string {
if x != nil {
return x.User
}
return ""
}
// Deprecated: Do not use.
func (x *Resource) GetGroup() string {
if x != nil {
return x.Group
}
return ""
}
func (x *Resource) GetMode() uint32 {
if x != nil {
return x.Mode
}
return 0
}
func (x *Resource) GetSize() uint64 {
if x != nil {
return x.Size
}
return 0
}
func (x *Resource) GetDigest() []string {
if x != nil {
return x.Digest
}
return nil
}
func (x *Resource) GetTarget() string {
if x != nil {
return x.Target
}
return ""
}
func (x *Resource) GetMajor() uint64 {
if x != nil {
return x.Major
}
return 0
}
func (x *Resource) GetMinor() uint64 {
if x != nil {
return x.Minor
}
return 0
}
func (x *Resource) GetXattr() []*XAttr {
if x != nil {
return x.Xattr
}
return nil
}
func (x *Resource) GetAds() []*ADSEntry {
if x != nil {
return x.Ads
}
return nil
}
// XAttr encodes extended attributes for a resource.
type XAttr struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name specifies the attribute name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Data specifies the associated data for the attribute.
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
}
func (x *XAttr) Reset() {
*x = XAttr{}
if protoimpl.UnsafeEnabled {
mi := &file_manifest_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *XAttr) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*XAttr) ProtoMessage() {}
func (x *XAttr) ProtoReflect() protoreflect.Message {
mi := &file_manifest_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use XAttr.ProtoReflect.Descriptor instead.
func (*XAttr) Descriptor() ([]byte, []int) {
return file_manifest_proto_rawDescGZIP(), []int{2}
}
func (x *XAttr) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *XAttr) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
// ADSEntry encodes information for a Windows Alternate Data Stream.
type ADSEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name specifices the stream name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Data specifies the stream data.
// See also the description about the digest below.
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
// Digest is a CAS representation of the stream data.
//
// At least one of data or digest MUST be specified, and either one of them
// SHOULD be specified.
//
// How to access the actual data using the digest is implementation-specific,
// and implementations can choose not to implement digest.
// So, digest SHOULD be used only when the stream data is large.
Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
}
func (x *ADSEntry) Reset() {
*x = ADSEntry{}
if protoimpl.UnsafeEnabled {
mi := &file_manifest_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ADSEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ADSEntry) ProtoMessage() {}
func (x *ADSEntry) ProtoReflect() protoreflect.Message {
mi := &file_manifest_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ADSEntry.ProtoReflect.Descriptor instead.
func (*ADSEntry) Descriptor() ([]byte, []int) {
return file_manifest_proto_rawDescGZIP(), []int{3}
}
func (x *ADSEntry) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ADSEntry) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *ADSEntry) GetDigest() string {
if x != nil {
return x.Digest
}
return ""
}
var File_manifest_proto protoreflect.FileDescriptor
var file_manifest_proto_rawDesc = []byte{
0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x37, 0x0a, 0x08, 0x4d, 0x61, 0x6e, 0x69, 0x66,
0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x22, 0xbf, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a,
0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74,
0x68, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03,
0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x04, 0x20,
0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a,
0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01,
0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18,
0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73,
0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12,
0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52,
0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x0b,
0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x22, 0x0a, 0x05, 0x78,
0x61, 0x74, 0x74, 0x72, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2e, 0x58, 0x41, 0x74, 0x74, 0x72, 0x52, 0x05, 0x78, 0x61, 0x74, 0x74, 0x72, 0x12,
0x21, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x44, 0x53, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x61,
0x64, 0x73, 0x22, 0x2f, 0x0a, 0x05, 0x58, 0x41, 0x74, 0x74, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64,
0x61, 0x74, 0x61, 0x22, 0x4a, 0x0a, 0x08, 0x41, 0x44, 0x53, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73,
0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42,
0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75,
0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_manifest_proto_rawDescOnce sync.Once
file_manifest_proto_rawDescData = file_manifest_proto_rawDesc
)
func file_manifest_proto_rawDescGZIP() []byte {
file_manifest_proto_rawDescOnce.Do(func() {
file_manifest_proto_rawDescData = protoimpl.X.CompressGZIP(file_manifest_proto_rawDescData)
})
return file_manifest_proto_rawDescData
}
var file_manifest_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_manifest_proto_goTypes = []interface{}{
(*Manifest)(nil), // 0: proto.Manifest
(*Resource)(nil), // 1: proto.Resource
(*XAttr)(nil), // 2: proto.XAttr
(*ADSEntry)(nil), // 3: proto.ADSEntry
}
var file_manifest_proto_depIdxs = []int32{
1, // 0: proto.Manifest.resource:type_name -> proto.Resource
2, // 1: proto.Resource.xattr:type_name -> proto.XAttr
3, // 2: proto.Resource.ads:type_name -> proto.ADSEntry
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_manifest_proto_init() }
func file_manifest_proto_init() {
if File_manifest_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_manifest_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Manifest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_manifest_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Resource); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_manifest_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*XAttr); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_manifest_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ADSEntry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_manifest_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_manifest_proto_goTypes,
DependencyIndexes: file_manifest_proto_depIdxs,
MessageInfos: file_manifest_proto_msgTypes,
}.Build()
File_manifest_proto = out.File
file_manifest_proto_rawDesc = nil
file_manifest_proto_goTypes = nil
file_manifest_proto_depIdxs = nil
}

View File

@ -0,0 +1,98 @@
syntax = "proto3";
package proto;
option go_package = "github.com/containerd/continuity/proto;proto";
// Manifest specifies the entries in a container bundle, keyed and sorted by
// path.
message Manifest {
repeated Resource resource = 1;
}
message Resource {
// Path specifies the path from the bundle root. If more than one
// path is present, the entry may represent a hardlink, rather than using
// a link target. The path format is operating system specific.
repeated string path = 1;
// NOTE(stevvooe): Need to define clear precedence for user/group/uid/gid precedence.
// Uid specifies the user id for the resource.
int64 uid = 2;
// Gid specifies the group id for the resource.
int64 gid = 3;
// user and group are not currently used but their field numbers have been
// reserved for future use. As such, they are marked as deprecated.
string user = 4 [deprecated=true]; // "deprecated" stands for "reserved" here
string group = 5 [deprecated=true]; // "deprecated" stands for "reserved" here
// Mode defines the file mode and permissions. We've used the same
// bit-packing from Go's os package,
// http://golang.org/pkg/os/#FileMode, since they've done the work of
// creating a cross-platform layout.
uint32 mode = 6;
// NOTE(stevvooe): Beyond here, we start defining type specific fields.
// Size specifies the size in bytes of the resource. This is only valid
// for regular files.
uint64 size = 7;
// Digest specifies the content digest of the target file. Only valid for
// regular files. The strings are formatted in OCI style, i.e. <alg>:<encoded>.
// For detailed information about the format, please refer to OCI Image Spec:
// https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification
// The digests are sorted in lexical order and implementations may choose
// which algorithms they prefer.
repeated string digest = 8;
// Target defines the target of a hard or soft link. Absolute links start
// with a slash and specify the resource relative to the bundle root.
// Relative links do not start with a slash and are relative to the
// resource path.
string target = 9;
// Major specifies the major device number for character and block devices.
uint64 major = 10;
// Minor specifies the minor device number for character and block devices.
uint64 minor = 11;
// Xattr provides storage for extended attributes for the target resource.
repeated XAttr xattr = 12;
// Ads stores one or more alternate data streams for the target resource.
repeated ADSEntry ads = 13;
}
// XAttr encodes extended attributes for a resource.
message XAttr {
// Name specifies the attribute name.
string name = 1;
// Data specifies the associated data for the attribute.
bytes data = 2;
}
// ADSEntry encodes information for a Windows Alternate Data Stream.
message ADSEntry {
// Name specifices the stream name.
string name = 1;
// Data specifies the stream data.
// See also the description about the digest below.
bytes data = 2;
// Digest is a CAS representation of the stream data.
//
// At least one of data or digest MUST be specified, and either one of them
// SHOULD be specified.
//
// How to access the actual data using the digest is implementation-specific,
// and implementations can choose not to implement digest.
// So, digest SHOULD be used only when the stream data is large.
string digest = 3;
}

590
vendor/github.com/containerd/continuity/resource.go generated vendored Normal file
View File

@ -0,0 +1,590 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"errors"
"fmt"
"os"
"reflect"
"sort"
pb "github.com/containerd/continuity/proto"
"github.com/opencontainers/go-digest"
)
// TODO(stevvooe): A record based model, somewhat sketched out at the bottom
// of this file, will be more flexible. Another possibly is to tie the package
// interface directly to the protobuf type. This will have efficiency
// advantages at the cost coupling the nasty codegen types to the exported
// interface.
type Resource interface {
// Path provides the primary resource path relative to the bundle root. In
// cases where resources have more than one path, such as with hard links,
// this will return the primary path, which is often just the first entry.
Path() string
// Mode returns the
Mode() os.FileMode
UID() int64
GID() int64
}
// ByPath provides the canonical sort order for a set of resources. Use with
// sort.Stable for deterministic sorting.
type ByPath []Resource
func (bp ByPath) Len() int { return len(bp) }
func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] }
func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() }
type XAttrer interface {
XAttrs() map[string][]byte
}
// Hardlinkable is an interface that a resource type satisfies if it can be a
// hardlink target.
type Hardlinkable interface {
// Paths returns all paths of the resource, including the primary path
// returned by Resource.Path. If len(Paths()) > 1, the resource is a hard
// link.
Paths() []string
}
type RegularFile interface {
Resource
XAttrer
Hardlinkable
Size() int64
Digests() []digest.Digest
}
// Merge two or more Resources into new file. Typically, this should be
// used to merge regular files as hardlinks. If the files are not identical,
// other than Paths and Digests, the merge will fail and an error will be
// returned.
func Merge(fs ...Resource) (Resource, error) {
if len(fs) < 1 {
return nil, fmt.Errorf("please provide a resource to merge")
}
if len(fs) == 1 {
return fs[0], nil
}
var paths []string
var digests []digest.Digest
bypath := map[string][]Resource{}
// The attributes are all compared against the first to make sure they
// agree before adding to the above collections. If any of these don't
// correctly validate, the merge fails.
prototype := fs[0]
xattrs := make(map[string][]byte)
// initialize xattrs for use below. All files must have same xattrs.
if prototypeXAttrer, ok := prototype.(XAttrer); ok {
for attr, value := range prototypeXAttrer.XAttrs() {
xattrs[attr] = value
}
}
for _, f := range fs {
h, isHardlinkable := f.(Hardlinkable)
if !isHardlinkable {
return nil, errNotAHardLink
}
if f.Mode() != prototype.Mode() {
return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode())
}
if f.UID() != prototype.UID() {
return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID())
}
if f.GID() != prototype.GID() {
return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID())
}
if xattrer, ok := f.(XAttrer); ok {
fxattrs := xattrer.XAttrs()
if !reflect.DeepEqual(fxattrs, xattrs) {
return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs)
}
}
for _, p := range h.Paths() {
pfs, ok := bypath[p]
if !ok {
// ensure paths are unique by only appending on a new path.
paths = append(paths, p)
}
bypath[p] = append(pfs, f)
}
if regFile, isRegFile := f.(RegularFile); isRegFile {
prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile)
if !prototypeIsRegFile {
return nil, errors.New("prototype is not a regular file")
}
if regFile.Size() != prototypeRegFile.Size() {
return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size())
}
digests = append(digests, regFile.Digests()...)
} else if device, isDevice := f.(Device); isDevice {
prototypeDevice, prototypeIsDevice := prototype.(Device)
if !prototypeIsDevice {
return nil, errors.New("prototype is not a device")
}
if device.Major() != prototypeDevice.Major() {
return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major())
}
if device.Minor() != prototypeDevice.Minor() {
return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor())
}
} else if _, isNamedPipe := f.(NamedPipe); isNamedPipe {
_, prototypeIsNamedPipe := prototype.(NamedPipe)
if !prototypeIsNamedPipe {
return nil, errors.New("prototype is not a named pipe")
}
} else {
return nil, errNotAHardLink
}
}
sort.Stable(sort.StringSlice(paths))
// Choose a "canonical" file. Really, it is just the first file to sort
// against. We also effectively select the very first digest as the
// "canonical" one for this file.
first := bypath[paths[0]][0]
resource := resource{
paths: paths,
mode: first.Mode(),
uid: first.UID(),
gid: first.GID(),
xattrs: xattrs,
}
switch typedF := first.(type) {
case RegularFile:
var err error
digests, err = uniqifyDigests(digests...)
if err != nil {
return nil, err
}
return &regularFile{
resource: resource,
size: typedF.Size(),
digests: digests,
}, nil
case Device:
return &device{
resource: resource,
major: typedF.Major(),
minor: typedF.Minor(),
}, nil
case NamedPipe:
return &namedPipe{
resource: resource,
}, nil
default:
return nil, errNotAHardLink
}
}
type Directory interface {
Resource
XAttrer
// Directory is a no-op method to identify directory objects by interface.
Directory()
}
type SymLink interface {
Resource
// Target returns the target of the symlink contained in the .
Target() string
}
type NamedPipe interface {
Resource
Hardlinkable
XAttrer
// Pipe is a no-op method to allow consistent resolution of NamedPipe
// interface.
Pipe()
}
type Device interface {
Resource
Hardlinkable
XAttrer
Major() uint64
Minor() uint64
}
type resource struct {
paths []string
mode os.FileMode
uid, gid int64
xattrs map[string][]byte
}
var _ Resource = &resource{}
func (r *resource) Path() string {
if len(r.paths) < 1 {
return ""
}
return r.paths[0]
}
func (r *resource) Mode() os.FileMode {
return r.mode
}
func (r *resource) UID() int64 {
return r.uid
}
func (r *resource) GID() int64 {
return r.gid
}
type regularFile struct {
resource
size int64
digests []digest.Digest
}
var _ RegularFile = &regularFile{}
// newRegularFile returns the RegularFile, using the populated base resource
// and one or more digests of the content.
func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) {
if !base.Mode().IsRegular() {
return nil, fmt.Errorf("not a regular file")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
// make our own copy of digests
ds := make([]digest.Digest, len(dgsts))
copy(ds, dgsts)
return &regularFile{
resource: base,
size: size,
digests: ds,
}, nil
}
func (rf *regularFile) Paths() []string {
paths := make([]string, len(rf.paths))
copy(paths, rf.paths)
return paths
}
func (rf *regularFile) Size() int64 {
return rf.size
}
func (rf *regularFile) Digests() []digest.Digest {
digests := make([]digest.Digest, len(rf.digests))
copy(digests, rf.digests)
return digests
}
func (rf *regularFile) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(rf.xattrs))
for attr, value := range rf.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type directory struct {
resource
}
var _ Directory = &directory{}
func newDirectory(base resource) (Directory, error) {
if !base.Mode().IsDir() {
return nil, fmt.Errorf("not a directory")
}
return &directory{
resource: base,
}, nil
}
func (d *directory) Directory() {}
func (d *directory) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(d.xattrs))
for attr, value := range d.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type symLink struct {
resource
target string
}
var _ SymLink = &symLink{}
func newSymLink(base resource, target string) (SymLink, error) {
if base.Mode()&os.ModeSymlink == 0 {
return nil, fmt.Errorf("not a symlink")
}
return &symLink{
resource: base,
target: target,
}, nil
}
func (l *symLink) Target() string {
return l.target
}
type namedPipe struct {
resource
}
var _ NamedPipe = &namedPipe{}
func newNamedPipe(base resource, paths []string) (NamedPipe, error) {
if base.Mode()&os.ModeNamedPipe == 0 {
return nil, fmt.Errorf("not a namedpipe")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
return &namedPipe{
resource: base,
}, nil
}
func (np *namedPipe) Pipe() {}
func (np *namedPipe) Paths() []string {
paths := make([]string, len(np.paths))
copy(paths, np.paths)
return paths
}
func (np *namedPipe) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(np.xattrs))
for attr, value := range np.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type device struct {
resource
major, minor uint64
}
var _ Device = &device{}
func newDevice(base resource, paths []string, major, minor uint64) (Device, error) {
if base.Mode()&os.ModeDevice == 0 {
return nil, fmt.Errorf("not a device")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
return &device{
resource: base,
major: major,
minor: minor,
}, nil
}
func (d *device) Paths() []string {
paths := make([]string, len(d.paths))
copy(paths, d.paths)
return paths
}
func (d *device) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(d.xattrs))
for attr, value := range d.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
func (d device) Major() uint64 {
return d.major
}
func (d device) Minor() uint64 {
return d.minor
}
// toProto converts a resource to a protobuf record. We'd like to push this
// the individual types but we want to keep this all together during
// prototyping.
func toProto(resource Resource) *pb.Resource {
b := &pb.Resource{
Path: []string{resource.Path()},
Mode: uint32(resource.Mode()),
Uid: resource.UID(),
Gid: resource.GID(),
}
if xattrer, ok := resource.(XAttrer); ok {
// Sorts the XAttrs by name for consistent ordering.
keys := []string{}
xattrs := xattrer.XAttrs()
for k := range xattrs {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]})
}
}
switch r := resource.(type) {
case RegularFile:
b.Path = r.Paths()
b.Size = uint64(r.Size())
for _, dgst := range r.Digests() {
b.Digest = append(b.Digest, dgst.String())
}
case SymLink:
b.Target = r.Target()
case Device:
b.Major, b.Minor = r.Major(), r.Minor()
b.Path = r.Paths()
case NamedPipe:
b.Path = r.Paths()
}
// enforce a few stability guarantees that may not be provided by the
// resource implementation.
sort.Strings(b.Path)
return b
}
// fromProto converts from a protobuf Resource to a Resource interface.
func fromProto(b *pb.Resource) (Resource, error) {
base := &resource{
paths: b.Path,
mode: os.FileMode(b.Mode),
uid: b.Uid,
gid: b.Gid,
}
base.xattrs = make(map[string][]byte, len(b.Xattr))
for _, attr := range b.Xattr {
base.xattrs[attr.Name] = attr.Data
}
switch {
case base.Mode().IsRegular():
dgsts := make([]digest.Digest, len(b.Digest))
for i, dgst := range b.Digest {
// TODO(stevvooe): Should we be validating at this point?
dgsts[i] = digest.Digest(dgst)
}
return newRegularFile(*base, b.Path, int64(b.Size), dgsts...)
case base.Mode().IsDir():
return newDirectory(*base)
case base.Mode()&os.ModeSymlink != 0:
return newSymLink(*base, b.Target)
case base.Mode()&os.ModeNamedPipe != 0:
return newNamedPipe(*base, b.Path)
case base.Mode()&os.ModeDevice != 0:
return newDevice(*base, b.Path, b.Major, b.Minor)
}
return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode())
}
// NOTE(stevvooe): An alternative model that supports inline declaration.
// Convenient for unit testing where inline declarations may be desirable but
// creates an awkward API for the standard use case.
// type ResourceKind int
// const (
// ResourceRegularFile = iota + 1
// ResourceDirectory
// ResourceSymLink
// Resource
// )
// type Resource struct {
// Kind ResourceKind
// Paths []string
// Mode os.FileMode
// UID string
// GID string
// Size int64
// Digests []digest.Digest
// Target string
// Major, Minor int
// XAttrs map[string][]byte
// }
// type RegularFile struct {
// Paths []string
// Size int64
// Digests []digest.Digest
// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid
// }

View File

@ -0,0 +1,54 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
"syscall"
)
// newBaseResource returns a *resource, populated with data from p and fi,
// where p will be populated directly.
func newBaseResource(p string, fi os.FileInfo) (*resource, error) {
// TODO(stevvooe): This need to be resolved for the container's root,
// where here we are really getting the host OS's value. We need to allow
// this be passed in and fixed up to make these uid/gid mappings portable.
// Either this can be part of the driver or we can achieve it through some
// other mechanism.
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
// TODO(stevvooe): This may not be a hard error for all platforms. We
// may want to move this to the driver.
return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi)
}
return &resource{
paths: []string{p},
mode: fi.Mode(),
uid: int64(sys.Uid),
gid: int64(sys.Gid),
// NOTE(stevvooe): Population of shared xattrs field is deferred to
// the resource types that populate it. Since they are a property of
// the context, they must set there.
}, nil
}

View File

@ -0,0 +1,28 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import "os"
// newBaseResource returns a *resource, populated with data from p and fi,
// where p will be populated directly.
func newBaseResource(p string, fi os.FileInfo) (*resource, error) {
return &resource{
paths: []string{p},
mode: fi.Mode(),
}, nil
}

View File

@ -0,0 +1,16 @@
package dockerd
type Config struct {
Features map[string]bool `json:"features,omitempty"`
Mirrors []string `json:"registry-mirrors,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
}
type BuilderEntitlements struct {
NetworkHost bool `json:"network-host,omitempty"`
SecurityInsecure bool `json:"security-insecure,omitempty"`
}
type BuilderConfig struct {
Entitlements BuilderEntitlements `json:",omitempty"`
}

View File

@ -0,0 +1,243 @@
package dockerd
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
)
type LogT interface {
Logf(string, ...interface{})
}
type nopLog struct{}
func (nopLog) Logf(string, ...interface{}) {}
const (
shortLen = 12
defaultDockerdBinary = "dockerd"
)
type Option func(*Daemon)
type Daemon struct {
root string
folder string
Wait chan error
id string
cmd *exec.Cmd
storageDriver string
execRoot string
dockerdBinary string
Log LogT
pidFile string
sockPath string
args []string
}
var sockRoot = filepath.Join(os.TempDir(), "docker-integration")
func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
if err := os.MkdirAll(sockRoot, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon socket root %q", sockRoot)
}
id := "d" + identity.NewID()[:shortLen]
daemonFolder, err := filepath.Abs(filepath.Join(workingDir, id))
if err != nil {
return nil, err
}
daemonRoot := filepath.Join(daemonFolder, "root")
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot)
}
d := &Daemon{
id: id,
folder: daemonFolder,
root: daemonRoot,
storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
// dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation)
execRoot: filepath.Join(os.TempDir(), "dxr", id),
dockerdBinary: defaultDockerdBinary,
Log: nopLog{},
sockPath: filepath.Join(sockRoot, id+".sock"),
}
for _, op := range ops {
op(d)
}
return d, nil
}
func (d *Daemon) Sock() string {
return "unix://" + d.sockPath
}
func (d *Daemon) StartWithError(daemonLogs map[string]*bytes.Buffer, providedArgs ...string) error {
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
if err != nil {
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
}
if d.pidFile == "" {
d.pidFile = filepath.Join(d.folder, "docker.pid")
}
d.args = []string{
"--data-root", d.root,
"--exec-root", d.execRoot,
"--pidfile", d.pidFile,
"--containerd-namespace", d.id,
"--containerd-plugins-namespace", d.id + "p",
"--host", d.Sock(),
}
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.args = append(d.args, "--userns-remap", root)
}
// If we don't explicitly set the log-level or debug flag(-D) then
// turn on debug mode
var foundLog, foundSd bool
for _, a := range providedArgs {
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
foundLog = true
}
if strings.Contains(a, "--storage-driver") {
foundSd = true
}
}
if !foundLog {
d.args = append(d.args, "--debug")
}
if d.storageDriver != "" && !foundSd {
d.args = append(d.args, "--storage-driver", d.storageDriver)
}
d.args = append(d.args, providedArgs...)
d.cmd = exec.Command(dockerdBinary, d.args...)
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1", "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1")
if daemonLogs != nil {
b := new(bytes.Buffer)
daemonLogs["stdout: "+d.cmd.Path] = b
d.cmd.Stdout = &lockingWriter{Writer: b}
b = new(bytes.Buffer)
daemonLogs["stderr: "+d.cmd.Path] = b
d.cmd.Stderr = &lockingWriter{Writer: b}
}
fmt.Fprintf(d.cmd.Stderr, "> startCmd %v %+v\n", time.Now(), d.cmd.String())
if err := d.cmd.Start(); err != nil {
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
}
wait := make(chan error, 1)
go func() {
ret := d.cmd.Wait()
d.Log.Logf("[%s] exiting daemon", d.id)
// If we send before logging, we might accidentally log _after_ the test is done.
// As of Go 1.12, this incurs a panic instead of silently being dropped.
wait <- ret
close(wait)
}()
d.Wait = wait
d.Log.Logf("[%s] daemon started\n", d.id)
return nil
}
var errDaemonNotStarted = errors.New("daemon not started")
func (d *Daemon) StopWithError() (err error) {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
if err != nil {
d.Log.Logf("[%s] error while stopping daemon: %v", d.id, err)
} else {
d.Log.Logf("[%s] daemon stopped", d.id)
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
}
d.cmd = nil
}()
i := 1
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
tick := ticker.C
d.Log.Logf("[%s] stopping daemon", d.id)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
if strings.Contains(err.Error(), "os: process already finished") {
return errDaemonNotStarted
}
return errors.Wrapf(err, "[%s] could not send signal", d.id)
}
out1:
for {
select {
case err := <-d.Wait:
return err
case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
d.Log.Logf("[%s] daemon stop timed out after 20 seconds", d.id)
break out1
}
}
out2:
for {
select {
case err := <-d.Wait:
return err
case <-tick:
i++
if i > 5 {
d.Log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i)
break out2
}
d.Log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i)
}
}
}
if err := d.cmd.Process.Kill(); err != nil {
d.Log.Logf("[%s] failed to kill daemon: %v", d.id, err)
return err
}
return nil
}
type lockingWriter struct {
mu sync.Mutex
io.Writer
}
func (w *lockingWriter) Write(dt []byte) (int, error) {
w.mu.Lock()
n, err := w.Writer.Write(dt)
w.mu.Unlock()
return n, err
}

View File

@ -0,0 +1,130 @@
package testutil
import (
"context"
"encoding/json"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ImageInfo struct {
Desc ocispecs.Descriptor
Manifest ocispecs.Manifest
Img ocispecs.Image
Layers []map[string]*TarItem
LayersRaw [][]byte
descPlatform string
}
type ImagesInfo struct {
Desc ocispecs.Descriptor
Index ocispecs.Index
Images []*ImageInfo
}
func (idx ImagesInfo) Find(platform string) *ImageInfo {
result := idx.Filter(platform)
if len(result.Images) == 0 {
return nil
}
return result.Images[0]
}
func (idx ImagesInfo) Filter(platform string) *ImagesInfo {
result := &ImagesInfo{Desc: idx.Desc}
for _, info := range idx.Images {
if info.descPlatform == platform {
result.Images = append(result.Images, info)
}
}
return result
}
func (idx ImagesInfo) FindAttestation(platform string) *ImageInfo {
img := idx.Find(platform)
if img == nil {
return nil
}
for _, info := range idx.Images {
if info.Desc.Annotations["vnd.docker.reference.digest"] == string(img.Desc.Digest) {
return info
}
}
return nil
}
func ReadImages(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImagesInfo, error) {
idx := &ImagesInfo{Desc: desc}
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &idx.Index); err != nil {
return nil, err
}
if !images.IsIndexType(idx.Index.MediaType) {
img, err := ReadImage(ctx, p, desc)
if err != nil {
return nil, err
}
img.descPlatform = platforms.Format(img.Img.Platform)
idx.Images = append(idx.Images, img)
return idx, nil
}
for _, m := range idx.Index.Manifests {
img, err := ReadImage(ctx, p, m)
if err != nil {
return nil, err
}
img.descPlatform = platforms.Format(*m.Platform)
idx.Images = append(idx.Images, img)
}
return idx, nil
}
func ReadImage(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImageInfo, error) {
ii := &ImageInfo{Desc: desc}
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &ii.Manifest); err != nil {
return nil, err
}
if !images.IsManifestType(ii.Manifest.MediaType) {
return nil, errors.Errorf("invalid manifest type %s", ii.Manifest.MediaType)
}
dt, err = content.ReadBlob(ctx, p, ii.Manifest.Config)
if err != nil {
return nil, err
}
if err := json.Unmarshal(dt, &ii.Img); err != nil {
return nil, err
}
ii.Layers = make([]map[string]*TarItem, len(ii.Manifest.Layers))
ii.LayersRaw = make([][]byte, len(ii.Manifest.Layers))
for i, l := range ii.Manifest.Layers {
dt, err := content.ReadBlob(ctx, p, l)
if err != nil {
return nil, err
}
ii.LayersRaw[i] = dt
if images.IsLayerType(l.MediaType) {
m, err := ReadTarToMap(dt, true)
if err != nil {
return nil, err
}
ii.Layers[i] = m
}
}
return ii, nil
}

View File

@ -0,0 +1,89 @@
package integration
import (
"fmt"
"net"
"net/http"
"os"
"os/exec"
"testing"
"time"
"github.com/pkg/errors"
)
const (
azuriteBin = "azurite-blob"
)
type AzuriteOpts struct {
AccountName string
AccountKey string
}
func NewAzuriteServer(t *testing.T, sb Sandbox, opts AzuriteOpts) (address string, cl func() error, err error) {
t.Helper()
if _, err := exec.LookPath(azuriteBin); err != nil {
return "", nil, errors.Wrapf(err, "failed to lookup %s binary", azuriteBin)
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return "", nil, err
}
addr := l.Addr().String()
if err = l.Close(); err != nil {
return "", nil, err
}
host, port, err := net.SplitHostPort(addr)
if err != nil {
return "", nil, err
}
address = fmt.Sprintf("http://%s/%s", addr, opts.AccountName)
// start server
cmd := exec.Command(azuriteBin, "--disableProductStyleUrl", "--blobHost", host, "--blobPort", port, "--location", t.TempDir())
cmd.Env = append(os.Environ(), []string{
"AZURITE_ACCOUNTS=" + opts.AccountName + ":" + opts.AccountKey,
}...)
azuriteStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", nil, err
}
if err = waitAzurite(address, 15*time.Second); err != nil {
azuriteStop()
return "", nil, errors.Wrapf(err, "azurite did not start up: %s", formatLogs(sb.Logs()))
}
deferF.append(azuriteStop)
return
}
func waitAzurite(address string, d time.Duration) error {
step := 1 * time.Second
i := 0
for {
if resp, err := http.Get(fmt.Sprintf("%s?comp=list", address)); err == nil {
resp.Body.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}

View File

@ -0,0 +1,241 @@
package integration
import (
"bytes"
"context"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
func InitContainerdWorker() {
Register(&Containerd{
ID: "containerd",
Containerd: "containerd",
})
// defined in Dockerfile
// e.g. `containerd-1.1=/opt/containerd-1.1/bin,containerd-42.0=/opt/containerd-42.0/bin`
if s := os.Getenv("BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"); s != "" {
entries := strings.Split(s, ",")
for _, entry := range entries {
pair := strings.Split(strings.TrimSpace(entry), "=")
if len(pair) != 2 {
panic(errors.Errorf("unexpected BUILDKIT_INTEGRATION_CONTAINERD_EXTRA: %q", s))
}
name, bin := pair[0], pair[1]
Register(&Containerd{
ID: name,
Containerd: filepath.Join(bin, "containerd"),
// override PATH to make sure that the expected version of the shim binary is used
ExtraEnv: []string{fmt.Sprintf("PATH=%s:%s", bin, os.Getenv("PATH"))},
})
}
}
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if rootlessSupported(uid) {
Register(&Containerd{
ID: "containerd-rootless",
Containerd: "containerd",
UID: uid,
GID: gid,
Snapshotter: "native", // TODO: test with fuse-overlayfs as well, or automatically determine snapshotter
})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
Register(&Containerd{
ID: fmt.Sprintf("containerd-snapshotter-%s", s),
Containerd: "containerd",
Snapshotter: s,
})
}
}
type Containerd struct {
ID string
Containerd string
Snapshotter string
UID int
GID int
ExtraEnv []string // e.g. "PATH=/opt/containerd-1.4/bin:/usr/bin:..."
}
func (c *Containerd) Name() string {
return c.ID
}
func (c *Containerd) Rootless() bool {
return c.UID != 0
}
func (c *Containerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) {
if err := lookupBinary(c.Containerd); err != nil {
return nil, nil, err
}
if err := lookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
rootless := false
if c.UID != 0 {
if c.GID == 0 {
return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", c.UID, c.GID)
}
rootless = true
}
tmpdir, err := os.MkdirTemp("", "bktest_containerd")
if err != nil {
return nil, nil, err
}
if rootless {
if err := os.Chown(tmpdir, c.UID, c.GID); err != nil {
return nil, nil, err
}
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
address := filepath.Join(tmpdir, "containerd.sock")
config := fmt.Sprintf(`root = %q
state = %q
# CRI plugins listens on 10010/tcp for stream server.
# We disable CRI plugin so that multiple instance can run simultaneously.
disabled_plugins = ["cri"]
[grpc]
address = %q
[debug]
level = "debug"
address = %q
`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"))
var snBuildkitdArgs []string
if c.Snapshotter != "" {
snBuildkitdArgs = append(snBuildkitdArgs,
fmt.Sprintf("--containerd-worker-snapshotter=%s", c.Snapshotter))
if c.Snapshotter == "stargz" {
snPath, snCl, err := runStargzSnapshotter(cfg)
if err != nil {
return nil, nil, err
}
deferF.append(snCl)
config = fmt.Sprintf(`%s
[proxy_plugins]
[proxy_plugins.stargz]
type = "snapshot"
address = %q
`, config, snPath)
}
}
configFile := filepath.Join(tmpdir, "config.toml")
if err := os.WriteFile(configFile, []byte(config), 0644); err != nil {
return nil, nil, err
}
containerdArgs := []string{c.Containerd, "--config", configFile}
rootlessKitState := filepath.Join(tmpdir, "rootlesskit-containerd")
if rootless {
containerdArgs = append(append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.UID), "-i",
fmt.Sprintf("CONTAINERD_ROOTLESS_ROOTLESSKIT_STATE_DIR=%s", rootlessKitState),
// Integration test requires the access to localhost of the host network namespace.
// TODO: remove these configurations
"CONTAINERD_ROOTLESS_ROOTLESSKIT_NET=host",
"CONTAINERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=none",
"CONTAINERD_ROOTLESS_ROOTLESSKIT_FLAGS=--mtu=0",
}, c.ExtraEnv...), "containerd-rootless.sh", "-c", configFile)
}
cmd := exec.Command(containerdArgs[0], containerdArgs[1:]...) //nolint:gosec // test utility
cmd.Env = append(os.Environ(), c.ExtraEnv...)
ctdStop, err := startCmd(cmd, cfg.Logs)
if err != nil {
return nil, nil, err
}
if err := waitUnix(address, 10*time.Second, cmd); err != nil {
ctdStop()
return nil, nil, errors.Wrapf(err, "containerd did not start up: %s", formatLogs(cfg.Logs))
}
deferF.append(ctdStop)
buildkitdArgs := append([]string{"buildkitd",
"--oci-worker=false",
"--containerd-worker-gc=false",
"--containerd-worker=true",
"--containerd-worker-addr", address,
"--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603
}, snBuildkitdArgs...)
if runtime.GOOS != "windows" && c.Snapshotter != "native" {
c.ExtraEnv = append(c.ExtraEnv, "BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF=true")
}
if rootless {
pidStr, err := os.ReadFile(filepath.Join(rootlessKitState, "child_pid"))
if err != nil {
return nil, nil, err
}
pid, err := strconv.ParseInt(string(pidStr), 10, 64)
if err != nil {
return nil, nil, err
}
buildkitdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.UID), "-i", "--", "exec",
"nsenter", "-U", "--preserve-credentials", "-m", "-t", fmt.Sprintf("%d", pid)},
append(buildkitdArgs, "--containerd-worker-snapshotter=native")...)
}
buildkitdSock, stop, err := runBuildkitd(ctx, cfg, buildkitdArgs, cfg.Logs, c.UID, c.GID, c.ExtraEnv)
if err != nil {
printLogs(cfg.Logs, log.Println)
return nil, nil, err
}
deferF.append(stop)
return backend{
address: buildkitdSock,
containerdAddress: address,
rootless: rootless,
snapshotter: c.Snapshotter,
}, cl, nil
}
func formatLogs(m map[string]*bytes.Buffer) string {
var ss []string
for k, b := range m {
if b != nil {
ss = append(ss, fmt.Sprintf("%q:%q", k, b.String()))
}
}
return strings.Join(ss, ",")
}

View File

@ -0,0 +1,248 @@
package integration
import (
"context"
"encoding/json"
"io"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/client"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/util/testutil/dockerd"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// InitDockerdWorker registers a dockerd worker with the global registry.
func InitDockerdWorker() {
Register(&Moby{
ID: "dockerd",
IsRootless: false,
Unsupported: []string{
FeatureCacheExport,
FeatureCacheImport,
FeatureCacheBackendAzblob,
FeatureCacheBackendGha,
FeatureCacheBackendLocal,
FeatureCacheBackendRegistry,
FeatureCacheBackendS3,
FeatureDirectPush,
FeatureImageExporter,
FeatureMultiCacheExport,
FeatureMultiPlatform,
FeatureOCIExporter,
FeatureOCILayout,
FeatureProvenance,
FeatureSBOM,
FeatureSecurityMode,
FeatureCNINetwork,
},
})
Register(&Moby{
ID: "dockerd-containerd",
IsRootless: false,
ContainerdSnapshotter: true,
Unsupported: []string{
FeatureSecurityMode,
FeatureCNINetwork,
},
})
}
type Moby struct {
ID string
IsRootless bool
ContainerdSnapshotter bool
Unsupported []string
}
func (c Moby) Name() string {
return c.ID
}
func (c Moby) Rootless() bool {
return c.IsRootless
}
func (c Moby) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) {
if err := requireRoot(); err != nil {
return nil, nil, err
}
bkcfg, err := config.LoadFile(cfg.ConfigFile)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to load buildkit config file %s", cfg.ConfigFile)
}
dcfg := dockerd.Config{
Features: map[string]bool{
"containerd-snapshotter": c.ContainerdSnapshotter,
},
}
if reg, ok := bkcfg.Registries["docker.io"]; ok && len(reg.Mirrors) > 0 {
for _, m := range reg.Mirrors {
dcfg.Mirrors = append(dcfg.Mirrors, "http://"+m)
}
}
if bkcfg.Entitlements != nil {
for _, e := range bkcfg.Entitlements {
switch e {
case "network.host":
dcfg.Builder.Entitlements.NetworkHost = true
case "security.insecure":
dcfg.Builder.Entitlements.SecurityInsecure = true
}
}
}
dcfgdt, err := json.Marshal(dcfg)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to marshal dockerd config")
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
var proxyGroup errgroup.Group
deferF.append(proxyGroup.Wait)
workDir, err := os.MkdirTemp("", "integration")
if err != nil {
return nil, nil, err
}
d, err := dockerd.NewDaemon(workDir)
if err != nil {
return nil, nil, errors.Errorf("new daemon error: %q, %s", err, formatLogs(cfg.Logs))
}
dockerdConfigFile := filepath.Join(workDir, "daemon.json")
if err := os.WriteFile(dockerdConfigFile, dcfgdt, 0644); err != nil {
return nil, nil, err
}
dockerdFlags := []string{
"--config-file", dockerdConfigFile,
"--userland-proxy=false",
"--debug",
}
if s := os.Getenv("BUILDKIT_INTEGRATION_DOCKERD_FLAGS"); s != "" {
dockerdFlags = append(dockerdFlags, strings.Split(strings.TrimSpace(s), "\n")...)
}
err = d.StartWithError(cfg.Logs, dockerdFlags...)
if err != nil {
return nil, nil, err
}
deferF.append(d.StopWithError)
if err := waitUnix(d.Sock(), 5*time.Second, nil); err != nil {
return nil, nil, errors.Errorf("dockerd did not start up: %q, %s", err, formatLogs(cfg.Logs))
}
dockerAPI, err := client.NewClientWithOpts(client.WithHost(d.Sock()))
if err != nil {
return nil, nil, err
}
deferF.append(dockerAPI.Close)
err = waitForAPI(ctx, dockerAPI, 5*time.Second)
if err != nil {
return nil, nil, errors.Wrapf(err, "dockerd client api timed out: %s", formatLogs(cfg.Logs))
}
// Create a file descriptor to be used as a Unix domain socket.
// Remove it immediately (the name will still be valid for the socket) so that
// we don't leave files all over the users tmp tree.
f, err := os.CreateTemp("", "buildkit-integration")
if err != nil {
return
}
localPath := f.Name()
f.Close()
os.Remove(localPath)
listener, err := net.Listen("unix", localPath)
if err != nil {
return nil, nil, errors.Wrapf(err, "dockerd listener error: %s", formatLogs(cfg.Logs))
}
deferF.append(listener.Close)
proxyGroup.Go(func() error {
for {
tmpConn, err := listener.Accept()
if err != nil {
// Ignore the error from accept which is always a system error.
return nil
}
conn, err := dockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
if err != nil {
return err
}
proxyGroup.Go(func() error {
_, err := io.Copy(conn, tmpConn)
if err != nil {
return err
}
return tmpConn.Close()
})
proxyGroup.Go(func() error {
_, err := io.Copy(tmpConn, conn)
if err != nil {
return err
}
return conn.Close()
})
}
})
return backend{
address: "unix://" + listener.Addr().String(),
dockerAddress: d.Sock(),
rootless: c.IsRootless,
isDockerd: true,
unsupportedFeatures: c.Unsupported,
}, cl, nil
}
func waitForAPI(ctx context.Context, apiClient *client.Client, d time.Duration) error {
step := 50 * time.Millisecond
i := 0
for {
if _, err := apiClient.Ping(ctx); err == nil {
break
}
i++
if time.Duration(i)*step > d {
return errors.New("failed to connect to /_ping endpoint")
}
time.Sleep(step)
}
return nil
}
func IsTestDockerd() bool {
return os.Getenv("TEST_DOCKERD") == "1"
}
func IsTestDockerdMoby(sb Sandbox) bool {
b, err := getBackend(sb)
if err != nil {
return false
}
return b.isDockerd && sb.Name() == "dockerd"
}

View File

@ -0,0 +1,56 @@
package integration
import (
"context"
"encoding/json"
"os"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/images/archive"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
func providerFromBinary(fn string) (_ ocispecs.Descriptor, _ content.Provider, _ func(), err error) {
ctx := context.TODO()
tmpDir, err := os.MkdirTemp("", "buildkit-state")
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
close := func() {
os.RemoveAll(tmpDir)
}
defer func() {
if err != nil {
close()
}
}()
// can't use contentutil.Buffer because ImportIndex takes content.Store even though only requires Provider/Ingester
c, err := local.NewStore(tmpDir)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
f, err := os.Open(fn)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
defer f.Close()
desc, err := archive.ImportIndex(ctx, c, f)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
var idx ocispecs.Index
dt, err := content.ReadBlob(ctx, c, desc)
if err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
if err := json.Unmarshal(dt, &idx); err != nil {
return ocispecs.Descriptor{}, nil, nil, err
}
return idx.Manifests[0], c, close, nil
}

View File

@ -0,0 +1,116 @@
package integration
import (
"fmt"
"net"
"net/http"
"os"
"os/exec"
"testing"
"time"
"github.com/pkg/errors"
)
const (
minioBin = "minio"
mcBin = "mc"
)
type MinioOpts struct {
Region string
AccessKeyID string
SecretAccessKey string
}
func NewMinioServer(t *testing.T, sb Sandbox, opts MinioOpts) (address string, bucket string, cl func() error, err error) {
t.Helper()
bucket = randomString(10)
if _, err := exec.LookPath(minioBin); err != nil {
return "", "", nil, errors.Wrapf(err, "failed to lookup %s binary", minioBin)
}
if _, err := exec.LookPath(mcBin); err != nil {
return "", "", nil, errors.Wrapf(err, "failed to lookup %s binary", mcBin)
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return "", "", nil, err
}
addr := l.Addr().String()
if err = l.Close(); err != nil {
return "", "", nil, err
}
address = "http://" + addr
// start server
cmd := exec.Command(minioBin, "server", "--json", "--address", addr, t.TempDir())
cmd.Env = append(os.Environ(), []string{
"MINIO_ROOT_USER=" + opts.AccessKeyID,
"MINIO_ROOT_PASSWORD=" + opts.SecretAccessKey,
}...)
minioStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", "", nil, err
}
if err = waitMinio(address, 15*time.Second); err != nil {
minioStop()
return "", "", nil, errors.Wrapf(err, "minio did not start up: %s", formatLogs(sb.Logs()))
}
deferF.append(minioStop)
// create alias config
alias := randomString(10)
cmd = exec.Command(mcBin, "alias", "set", alias, address, opts.AccessKeyID, opts.SecretAccessKey)
if err := runCmd(cmd, sb.Logs()); err != nil {
return "", "", nil, err
}
deferF.append(func() error {
return exec.Command(mcBin, "alias", "rm", alias).Run()
})
// create bucket
cmd = exec.Command(mcBin, "mb", "--region", opts.Region, fmt.Sprintf("%s/%s", alias, bucket)) // #nosec G204
if err := runCmd(cmd, sb.Logs()); err != nil {
return "", "", nil, err
}
// trace
cmd = exec.Command(mcBin, "admin", "trace", "--json", alias)
traceStop, err := startCmd(cmd, sb.Logs())
if err != nil {
return "", "", nil, err
}
deferF.append(traceStop)
return
}
func waitMinio(address string, d time.Duration) error {
step := 1 * time.Second
i := 0
for {
if resp, err := http.Get(fmt.Sprintf("%s/minio/health/live", address)); err == nil {
resp.Body.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}

View File

@ -0,0 +1,86 @@
package integration
import (
"context"
"fmt"
"log"
"os"
"runtime"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
func InitOCIWorker() {
Register(&OCI{ID: "oci"})
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if rootlessSupported(uid) {
Register(&OCI{ID: "oci-rootless", UID: uid, GID: gid})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
Register(&OCI{ID: "oci-snapshotter-" + s, Snapshotter: s})
}
}
type OCI struct {
ID string
UID int
GID int
Snapshotter string
}
func (s *OCI) Name() string {
return s.ID
}
func (s *OCI) Rootless() bool {
return s.UID != 0
}
func (s *OCI) New(ctx context.Context, cfg *BackendConfig) (Backend, func() error, error) {
if err := lookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
// Include use of --oci-worker-labels to trigger https://github.com/moby/buildkit/pull/603
buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-gc=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"}
if s.Snapshotter != "" {
buildkitdArgs = append(buildkitdArgs,
fmt.Sprintf("--oci-worker-snapshotter=%s", s.Snapshotter))
}
if s.UID != 0 {
if s.GID == 0 {
return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", s.UID, s.GID)
}
// TODO: make sure the user exists and subuid/subgid are configured.
buildkitdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", s.UID), "-i", "--", "exec", "rootlesskit"}, buildkitdArgs...)
}
var extraEnv []string
if runtime.GOOS != "windows" && s.Snapshotter != "native" {
extraEnv = append(extraEnv, "BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF=true")
}
buildkitdSock, stop, err := runBuildkitd(ctx, cfg, buildkitdArgs, cfg.Logs, s.UID, s.GID, extraEnv)
if err != nil {
printLogs(cfg.Logs, log.Println)
return nil, nil, err
}
return backend{
address: buildkitdSock,
rootless: s.UID != 0,
snapshotter: s.Snapshotter,
}, stop, nil
}

View File

@ -0,0 +1,15 @@
package integration
var pins = map[string]map[string]string{
// busybox is pinned to 1.35. Newer produces has "illegal instruction" panic on some of Github infra on sha256sum
"busybox:latest": {
"amd64": "sha256:0d5a701f0ca53f38723108687add000e1922f812d4187dea7feaee85d2f5a6c5",
"arm64v8": "sha256:ffe38d75e44d8ffac4cd6d09777ffc31e94ea0ded6a0164e825a325dc17a3b68",
"library": "sha256:f4ed5f2163110c26d42741fdc92bd1710e118aed4edb19212548e8ca4e5fca22",
},
"alpine:latest": {
"amd64": "sha256:c0d488a800e4127c334ad20d61d7bc21b4097540327217dfab52262adc02380c",
"arm64v8": "sha256:af06af3514c44a964d3b905b498cf6493db8f1cde7c10e078213a89c87308ba0",
"library": "sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4",
},
}

View File

@ -0,0 +1,109 @@
package integration
import (
"bufio"
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"time"
"github.com/pkg/errors"
)
func NewRegistry(dir string) (url string, cl func() error, err error) {
if err := lookupBinary("registry"); err != nil {
return "", nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if dir == "" {
tmpdir, err := os.MkdirTemp("", "test-registry")
if err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
dir = tmpdir
}
if _, err := os.Stat(filepath.Join(dir, "config.yaml")); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return "", nil, err
}
template := fmt.Sprintf(`version: 0.1
loglevel: debug
storage:
filesystem:
rootdirectory: %s
http:
addr: 127.0.0.1:0
`, filepath.Join(dir, "data"))
if err := os.WriteFile(filepath.Join(dir, "config.yaml"), []byte(template), 0600); err != nil {
return "", nil, err
}
}
cmd := exec.Command("registry", "serve", filepath.Join(dir, "config.yaml")) //nolint:gosec // test utility
rc, err := cmd.StderrPipe()
if err != nil {
return "", nil, err
}
stop, err := startCmd(cmd, nil)
if err != nil {
return "", nil, err
}
deferF.append(stop)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
url, err = detectPort(ctx, rc)
if err != nil {
return "", nil, err
}
return
}
func detectPort(ctx context.Context, rc io.ReadCloser) (string, error) {
r := regexp.MustCompile(`listening on 127\.0\.0\.1:(\d+)`)
s := bufio.NewScanner(rc)
found := make(chan struct{})
defer func() {
close(found)
go io.Copy(io.Discard, rc)
}()
go func() {
select {
case <-ctx.Done():
select {
case <-found:
return
default:
rc.Close()
}
case <-found:
}
}()
for s.Scan() {
res := r.FindSubmatch(s.Bytes())
if len(res) > 1 {
return "localhost:" + string(res[1]), nil
}
}
return "", errors.Errorf("no listening address found")
}

View File

@ -0,0 +1,459 @@
package integration
import (
"bytes"
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes/docker"
"github.com/gofrs/flock"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/contentutil"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/semaphore"
)
var sandboxLimiter *semaphore.Weighted
func init() {
sandboxLimiter = semaphore.NewWeighted(int64(runtime.GOMAXPROCS(0)))
}
// Backend is the minimal interface that describes a testing backend.
type Backend interface {
Address() string
DockerAddress() string
ContainerdAddress() string
Rootless() bool
Snapshotter() string
}
type Sandbox interface {
Backend
Context() context.Context
Cmd(...string) *exec.Cmd
Logs() map[string]*bytes.Buffer
PrintLogs(*testing.T)
ClearLogs()
NewRegistry() (string, error)
Value(string) interface{} // chosen matrix value
Name() string
}
// BackendConfig is used to configure backends created by a worker.
type BackendConfig struct {
Logs map[string]*bytes.Buffer
ConfigFile string
}
type Worker interface {
New(context.Context, *BackendConfig) (Backend, func() error, error)
Name() string
Rootless() bool
}
type ConfigUpdater interface {
UpdateConfigFile(string) string
}
type Test interface {
Name() string
Run(t *testing.T, sb Sandbox)
}
type testFunc struct {
name string
run func(t *testing.T, sb Sandbox)
}
func (f testFunc) Name() string {
return f.name
}
func (f testFunc) Run(t *testing.T, sb Sandbox) {
t.Helper()
f.run(t, sb)
}
func TestFuncs(funcs ...func(t *testing.T, sb Sandbox)) []Test {
var tests []Test
names := map[string]struct{}{}
for _, f := range funcs {
name := getFunctionName(f)
if _, ok := names[name]; ok {
panic("duplicate test: " + name)
}
names[name] = struct{}{}
tests = append(tests, testFunc{name: name, run: f})
}
return tests
}
var defaultWorkers []Worker
func Register(w Worker) {
defaultWorkers = append(defaultWorkers, w)
}
func List() []Worker {
return defaultWorkers
}
// TestOpt is an option that can be used to configure a set of integration
// tests.
type TestOpt func(*testConf)
func WithMatrix(key string, m map[string]interface{}) TestOpt {
return func(tc *testConf) {
if tc.matrix == nil {
tc.matrix = map[string]map[string]interface{}{}
}
tc.matrix[key] = m
}
}
func WithMirroredImages(m map[string]string) TestOpt {
return func(tc *testConf) {
if tc.mirroredImages == nil {
tc.mirroredImages = map[string]string{}
}
for k, v := range m {
tc.mirroredImages[k] = v
}
}
}
type testConf struct {
matrix map[string]map[string]interface{}
mirroredImages map[string]string
}
func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
if testing.Short() {
t.Skip("skipping in short mode")
}
if os.Getenv("SKIP_INTEGRATION_TESTS") == "1" {
t.Skip("skipping integration tests")
}
var tc testConf
for _, o := range opt {
o(&tc)
}
mirror, cleanup, err := runMirror(t, tc.mirroredImages)
require.NoError(t, err)
t.Cleanup(func() { _ = cleanup() })
matrix := prepareValueMatrix(tc)
list := List()
if os.Getenv("BUILDKIT_WORKER_RANDOM") == "1" && len(list) > 0 {
rng := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec // using math/rand is fine in a test utility
list = []Worker{list[rng.Intn(len(list))]}
}
for _, br := range list {
for _, tc := range testCases {
for _, mv := range matrix {
fn := tc.Name()
name := fn + "/worker=" + br.Name() + mv.functionSuffix()
func(fn, testName string, br Worker, tc Test, mv matrixValue) {
ok := t.Run(testName, func(t *testing.T) {
if strings.Contains(fn, "NoRootless") && br.Rootless() {
// skip sandbox setup
t.Skip("rootless")
}
ctx := appcontext.Context()
if !strings.HasSuffix(fn, "NoParallel") {
t.Parallel()
}
require.NoError(t, sandboxLimiter.Acquire(context.TODO(), 1))
defer sandboxLimiter.Release(1)
sb, closer, err := newSandbox(ctx, br, mirror, mv)
require.NoError(t, err)
t.Cleanup(func() { _ = closer() })
defer func() {
if t.Failed() {
sb.PrintLogs(t)
}
}()
tc.Run(t, sb)
})
require.True(t, ok)
}(fn, name, br, tc, mv)
}
}
}
}
func getFunctionName(i interface{}) string {
fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
dot := strings.LastIndex(fullname, ".") + 1
return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support
}
var localImageCache map[string]map[string]struct{}
func copyImagesLocal(t *testing.T, host string, images map[string]string) error {
for to, from := range images {
if localImageCache == nil {
localImageCache = map[string]map[string]struct{}{}
}
if _, ok := localImageCache[host]; !ok {
localImageCache[host] = map[string]struct{}{}
}
if _, ok := localImageCache[host][to]; ok {
continue
}
localImageCache[host][to] = struct{}{}
var desc ocispecs.Descriptor
var provider content.Provider
var err error
if strings.HasPrefix(from, "local:") {
var closer func()
desc, provider, closer, err = providerFromBinary(strings.TrimPrefix(from, "local:"))
if err != nil {
return err
}
if closer != nil {
defer closer()
}
} else {
desc, provider, err = contentutil.ProviderFromRef(from)
if err != nil {
return err
}
}
// already exists check
_, _, err = docker.NewResolver(docker.ResolverOptions{}).Resolve(context.TODO(), host+"/"+to)
if err == nil {
continue
}
ingester, err := contentutil.IngesterFromRef(host + "/" + to)
if err != nil {
return err
}
if err := contentutil.CopyChain(context.TODO(), ingester, provider, desc); err != nil {
return err
}
t.Logf("copied %s to local mirror %s", from, host+"/"+to)
}
return nil
}
func OfficialImages(names ...string) map[string]string {
ns := runtime.GOARCH
if ns == "arm64" {
ns = "arm64v8"
} else if ns != "amd64" {
ns = "library"
}
m := map[string]string{}
for _, name := range names {
ref := "docker.io/" + ns + "/" + name
if pns, ok := pins[name]; ok {
if dgst, ok := pns[ns]; ok {
ref += "@" + dgst
}
}
m["library/"+name] = ref
}
return m
}
func withMirrorConfig(mirror string) ConfigUpdater {
return mirrorConfig(mirror)
}
type mirrorConfig string
func (mc mirrorConfig) UpdateConfigFile(in string) string {
return fmt.Sprintf(`%s
[registry."docker.io"]
mirrors=["%s"]
`, in, mc)
}
func writeConfig(updaters []ConfigUpdater) (string, error) {
tmpdir, err := os.MkdirTemp("", "bktest_config")
if err != nil {
return "", err
}
if err := os.Chmod(tmpdir, 0711); err != nil {
return "", err
}
s := ""
for _, upt := range updaters {
s = upt.UpdateConfigFile(s)
}
if err := os.WriteFile(filepath.Join(tmpdir, buildkitdConfigFile), []byte(s), 0644); err != nil {
return "", err
}
return tmpdir, nil
}
func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ func() error, err error) {
mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR")
var lock *flock.Flock
if mirrorDir != "" {
if err := os.MkdirAll(mirrorDir, 0700); err != nil {
return "", nil, err
}
lock = flock.New(filepath.Join(mirrorDir, "lock"))
if err := lock.Lock(); err != nil {
return "", nil, err
}
defer func() {
if err != nil {
lock.Unlock()
}
}()
}
mirror, cleanup, err := NewRegistry(mirrorDir)
if err != nil {
return "", nil, err
}
defer func() {
if err != nil {
cleanup()
}
}()
if err := copyImagesLocal(t, mirror, mirroredImages); err != nil {
return "", nil, err
}
if mirrorDir != "" {
if err := lock.Unlock(); err != nil {
return "", nil, err
}
}
return mirror, cleanup, err
}
type matrixValue struct {
fn []string
values map[string]matrixValueChoice
}
func (mv matrixValue) functionSuffix() string {
if len(mv.fn) == 0 {
return ""
}
sort.Strings(mv.fn)
sb := &strings.Builder{}
for _, f := range mv.fn {
sb.Write([]byte("/" + f + "=" + mv.values[f].name))
}
return sb.String()
}
type matrixValueChoice struct {
name string
value interface{}
}
func newMatrixValue(key, name string, v interface{}) matrixValue {
return matrixValue{
fn: []string{key},
values: map[string]matrixValueChoice{
key: {
name: name,
value: v,
},
},
}
}
func prepareValueMatrix(tc testConf) []matrixValue {
m := []matrixValue{}
for featureName, values := range tc.matrix {
current := m
m = []matrixValue{}
for featureValue, v := range values {
if len(current) == 0 {
m = append(m, newMatrixValue(featureName, featureValue, v))
}
for _, c := range current {
vv := newMatrixValue(featureName, featureValue, v)
vv.fn = append(vv.fn, c.fn...)
for k, v := range c.values {
vv.values[k] = v
}
m = append(m, vv)
}
}
}
if len(m) == 0 {
m = append(m, matrixValue{})
}
return m
}
func runStargzSnapshotter(cfg *BackendConfig) (address string, cl func() error, err error) {
binary := "containerd-stargz-grpc"
if err := lookupBinary(binary); err != nil {
return "", nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
tmpStargzDir, err := os.MkdirTemp("", "bktest_containerd_stargz_grpc")
if err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpStargzDir) })
address = filepath.Join(tmpStargzDir, "containerd-stargz-grpc.sock")
stargzRootDir := filepath.Join(tmpStargzDir, "root")
cmd := exec.Command(binary,
"--log-level", "debug",
"--address", address,
"--root", stargzRootDir)
snStop, err := startCmd(cmd, cfg.Logs)
if err != nil {
return "", nil, err
}
if err = waitUnix(address, 10*time.Second, cmd); err != nil {
snStop()
return "", nil, errors.Wrapf(err, "containerd-stargz-grpc did not start up: %s", formatLogs(cfg.Logs))
}
deferF.append(snStop)
return
}

View File

@ -0,0 +1,369 @@
package integration
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/google/shlex"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
const buildkitdConfigFile = "buildkitd.toml"
type backend struct {
address string
dockerAddress string
containerdAddress string
rootless bool
snapshotter string
unsupportedFeatures []string
isDockerd bool
}
func (b backend) Address() string {
return b.address
}
func (b backend) DockerAddress() string {
return b.dockerAddress
}
func (b backend) ContainerdAddress() string {
return b.containerdAddress
}
func (b backend) Rootless() bool {
return b.rootless
}
func (b backend) Snapshotter() string {
return b.snapshotter
}
func (b backend) isUnsupportedFeature(feature string) bool {
if enabledFeatures := os.Getenv("BUILDKIT_TEST_ENABLE_FEATURES"); enabledFeatures != "" {
for _, enabledFeature := range strings.Split(enabledFeatures, ",") {
if feature == enabledFeature {
return false
}
}
}
if disabledFeatures := os.Getenv("BUILDKIT_TEST_DISABLE_FEATURES"); disabledFeatures != "" {
for _, disabledFeature := range strings.Split(disabledFeatures, ",") {
if feature == disabledFeature {
return true
}
}
}
for _, unsupportedFeature := range b.unsupportedFeatures {
if feature == unsupportedFeature {
return true
}
}
return false
}
type sandbox struct {
Backend
logs map[string]*bytes.Buffer
cleanup *multiCloser
mv matrixValue
ctx context.Context
name string
}
func (sb *sandbox) Name() string {
return sb.name
}
func (sb *sandbox) Context() context.Context {
return sb.ctx
}
func (sb *sandbox) Logs() map[string]*bytes.Buffer {
return sb.logs
}
func (sb *sandbox) PrintLogs(t *testing.T) {
printLogs(sb.logs, t.Log)
}
func (sb *sandbox) ClearLogs() {
sb.logs = make(map[string]*bytes.Buffer)
}
func (sb *sandbox) NewRegistry() (string, error) {
url, cl, err := NewRegistry("")
if err != nil {
return "", err
}
sb.cleanup.append(cl)
return url, nil
}
func (sb *sandbox) Cmd(args ...string) *exec.Cmd {
if len(args) == 1 {
if split, err := shlex.Split(args[0]); err == nil {
args = split
}
}
cmd := exec.Command("buildctl", args...)
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "BUILDKIT_HOST="+sb.Address())
return cmd
}
func (sb *sandbox) Value(k string) interface{} {
return sb.mv.values[k].value
}
func newSandbox(ctx context.Context, w Worker, mirror string, mv matrixValue) (s Sandbox, cl func() error, err error) {
cfg := &BackendConfig{
Logs: make(map[string]*bytes.Buffer),
}
var upt []ConfigUpdater
for _, v := range mv.values {
if u, ok := v.value.(ConfigUpdater); ok {
upt = append(upt, u)
}
}
if mirror != "" {
upt = append(upt, withMirrorConfig(mirror))
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if len(upt) > 0 {
dir, err := writeConfig(upt)
if err != nil {
return nil, nil, err
}
deferF.append(func() error {
return os.RemoveAll(dir)
})
cfg.ConfigFile = filepath.Join(dir, buildkitdConfigFile)
}
b, closer, err := w.New(ctx, cfg)
if err != nil {
return nil, nil, err
}
deferF.append(closer)
return &sandbox{
Backend: b,
logs: cfg.Logs,
cleanup: deferF,
mv: mv,
ctx: ctx,
name: w.Name(),
}, cl, nil
}
func getBuildkitdAddr(tmpdir string) string {
address := "unix://" + filepath.Join(tmpdir, "buildkitd.sock")
if runtime.GOOS == "windows" {
address = "//./pipe/buildkitd-" + filepath.Base(tmpdir)
}
return address
}
func runBuildkitd(ctx context.Context, conf *BackendConfig, args []string, logs map[string]*bytes.Buffer, uid, gid int, extraEnv []string) (address string, cl func() error, err error) {
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
if conf.ConfigFile != "" {
args = append(args, "--config="+conf.ConfigFile)
}
tmpdir, err := os.MkdirTemp("", "bktest_buildkitd")
if err != nil {
return "", nil, err
}
if err := os.Chown(tmpdir, uid, gid); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Join(tmpdir, "tmp"), 0711); err != nil {
return "", nil, err
}
if err := os.Chown(filepath.Join(tmpdir, "tmp"), uid, gid); err != nil {
return "", nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
address = getBuildkitdAddr(tmpdir)
args = append(args, "--root", tmpdir, "--addr", address, "--debug")
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // test utility
cmd.Env = append(os.Environ(), "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1", "TMPDIR="+filepath.Join(tmpdir, "tmp"))
cmd.Env = append(cmd.Env, extraEnv...)
cmd.SysProcAttr = getSysProcAttr()
stop, err := startCmd(cmd, logs)
if err != nil {
return "", nil, err
}
deferF.append(stop)
if err := waitUnix(address, 15*time.Second, cmd); err != nil {
return "", nil, err
}
deferF.append(func() error {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return errors.Wrap(err, "failed to open mountinfo")
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), tmpdir) {
return errors.Errorf("leaked mountpoint for %s", tmpdir)
}
}
return s.Err()
})
return address, cl, err
}
func getBackend(sb Sandbox) (*backend, error) {
sbx, ok := sb.(*sandbox)
if !ok {
return nil, errors.Errorf("invalid sandbox type %T", sb)
}
b, ok := sbx.Backend.(backend)
if !ok {
return nil, errors.Errorf("invalid backend type %T", b)
}
return &b, nil
}
func rootlessSupported(uid int) bool {
cmd := exec.Command("sudo", "-u", fmt.Sprintf("#%d", uid), "-i", "--", "exec", "unshare", "-U", "true") //nolint:gosec // test utility
b, err := cmd.CombinedOutput()
if err != nil {
bklog.L.Warnf("rootless mode is not supported on this host: %v (%s)", err, string(b))
return false
}
return true
}
func printLogs(logs map[string]*bytes.Buffer, f func(args ...interface{})) {
for name, l := range logs {
f(name)
s := bufio.NewScanner(l)
for s.Scan() {
f(s.Text())
}
}
}
const (
FeatureCacheExport = "cache_export"
FeatureCacheImport = "cache_import"
FeatureCacheBackendAzblob = "cache_backend_azblob"
FeatureCacheBackendGha = "cache_backend_gha"
FeatureCacheBackendInline = "cache_backend_inline"
FeatureCacheBackendLocal = "cache_backend_local"
FeatureCacheBackendRegistry = "cache_backend_registry"
FeatureCacheBackendS3 = "cache_backend_s3"
FeatureDirectPush = "direct_push"
FeatureFrontendOutline = "frontend_outline"
FeatureFrontendTargets = "frontend_targets"
FeatureImageExporter = "image_exporter"
FeatureInfo = "info"
FeatureMergeDiff = "merge_diff"
FeatureMultiCacheExport = "multi_cache_export"
FeatureMultiPlatform = "multi_platform"
FeatureOCIExporter = "oci_exporter"
FeatureOCILayout = "oci_layout"
FeatureProvenance = "provenance"
FeatureSBOM = "sbom"
FeatureSecurityMode = "security_mode"
FeatureSourceDateEpoch = "source_date_epoch"
FeatureCNINetwork = "cni_network"
)
var features = map[string]struct{}{
FeatureCacheExport: {},
FeatureCacheImport: {},
FeatureCacheBackendAzblob: {},
FeatureCacheBackendGha: {},
FeatureCacheBackendInline: {},
FeatureCacheBackendLocal: {},
FeatureCacheBackendRegistry: {},
FeatureCacheBackendS3: {},
FeatureDirectPush: {},
FeatureFrontendOutline: {},
FeatureFrontendTargets: {},
FeatureImageExporter: {},
FeatureInfo: {},
FeatureMergeDiff: {},
FeatureMultiCacheExport: {},
FeatureMultiPlatform: {},
FeatureOCIExporter: {},
FeatureOCILayout: {},
FeatureProvenance: {},
FeatureSBOM: {},
FeatureSecurityMode: {},
FeatureSourceDateEpoch: {},
FeatureCNINetwork: {},
}
func CheckFeatureCompat(t *testing.T, sb Sandbox, reason ...string) {
t.Helper()
if len(reason) == 0 {
t.Fatal("no reason provided")
}
b, err := getBackend(sb)
if err != nil {
t.Fatal(err)
}
if len(b.unsupportedFeatures) == 0 {
return
}
var ereasons []string
for _, r := range reason {
if _, ok := features[r]; ok {
if b.isUnsupportedFeature(r) {
ereasons = append(ereasons, r)
}
} else {
sb.ClearLogs()
t.Fatalf("unknown reason %q to skip test", r)
}
}
if len(ereasons) > 0 {
t.Skipf("%s worker can not currently run this test due to missing features (%s)", sb.Name(), strings.Join(ereasons, ", "))
}
}

View File

@ -0,0 +1,12 @@
//go:build !windows
// +build !windows
package integration
import "syscall"
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setsid: true, // stretch sudo needs this for sigterm
}
}

View File

@ -0,0 +1,10 @@
//go:build windows
// +build windows
package integration
import "syscall"
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{}
}

View File

@ -0,0 +1,196 @@
package integration
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/containerd/continuity/fs/fstest"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func runCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) error {
if logs != nil {
setCmdLogs(cmd, logs)
}
fmt.Fprintf(cmd.Stderr, "> runCmd %v %+v\n", time.Now(), cmd.String())
return cmd.Run()
}
func startCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) (func() error, error) {
if logs != nil {
setCmdLogs(cmd, logs)
}
fmt.Fprintf(cmd.Stderr, "> startCmd %v %+v\n", time.Now(), cmd.String())
if err := cmd.Start(); err != nil {
return nil, err
}
eg, ctx := errgroup.WithContext(context.TODO())
stopped := make(chan struct{})
stop := make(chan struct{})
eg.Go(func() error {
err := cmd.Wait()
fmt.Fprintf(cmd.Stderr, "> stopped %v %+v %v\n", time.Now(), cmd.ProcessState, cmd.ProcessState.ExitCode())
close(stopped)
select {
case <-stop:
return nil
default:
return err
}
})
eg.Go(func() error {
select {
case <-ctx.Done():
case <-stopped:
case <-stop:
fmt.Fprintf(cmd.Stderr, "> sending sigterm %v\n", time.Now())
cmd.Process.Signal(syscall.SIGTERM)
go func() {
select {
case <-stopped:
case <-time.After(20 * time.Second):
cmd.Process.Kill()
}
}()
}
return nil
})
return func() error {
close(stop)
return eg.Wait()
}, nil
}
func setCmdLogs(cmd *exec.Cmd, logs map[string]*bytes.Buffer) {
b := new(bytes.Buffer)
logs["stdout: "+cmd.String()] = b
cmd.Stdout = &lockingWriter{Writer: b}
b = new(bytes.Buffer)
logs["stderr: "+cmd.String()] = b
cmd.Stderr = &lockingWriter{Writer: b}
}
func waitUnix(address string, d time.Duration, cmd *exec.Cmd) error {
address = strings.TrimPrefix(address, "unix://")
addr, err := net.ResolveUnixAddr("unix", address)
if err != nil {
return errors.Wrapf(err, "failed resolving unix addr: %s", address)
}
step := 50 * time.Millisecond
i := 0
for {
if cmd != nil && cmd.ProcessState != nil {
return errors.Errorf("process exited: %s", cmd.String())
}
if conn, err := net.DialUnix("unix", nil, addr); err == nil {
conn.Close()
break
}
i++
if time.Duration(i)*step > d {
return errors.Errorf("failed dialing: %s", address)
}
time.Sleep(step)
}
return nil
}
type multiCloser struct {
fns []func() error
}
func (mc *multiCloser) F() func() error {
return func() error {
var err error
for i := range mc.fns {
if err1 := mc.fns[len(mc.fns)-1-i](); err == nil {
err = err1
}
}
mc.fns = nil
return err
}
}
func (mc *multiCloser) append(f func() error) {
mc.fns = append(mc.fns, f)
}
var ErrRequirements = errors.Errorf("missing requirements")
func lookupBinary(name string) error {
_, err := exec.LookPath(name)
if err != nil {
return errors.Wrapf(ErrRequirements, "failed to lookup %s binary", name)
}
return nil
}
func requireRoot() error {
if os.Getuid() != 0 {
return errors.Wrap(ErrRequirements, "requires root")
}
return nil
}
type lockingWriter struct {
mu sync.Mutex
io.Writer
}
func (w *lockingWriter) Write(dt []byte) (int, error) {
w.mu.Lock()
n, err := w.Writer.Write(dt)
w.mu.Unlock()
return n, err
}
func Tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) {
// We cannot use t.TempDir() to create a temporary directory here because
// appliers might contain fstest.CreateSocket. If the test name is too long,
// t.TempDir() could return a path that is longer than 108 characters. This
// would result in "bind: invalid argument" when we listen on the socket.
tmpdir, err := os.MkdirTemp("", "buildkit")
if err != nil {
return "", err
}
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpdir))
})
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
return tmpdir, nil
}
func randomString(n int) string {
chars := "abcdefghijklmnopqrstuvwxyz"
var b = make([]byte, n)
_, _ = rand.Read(b)
for k, v := range b {
b[k] = chars[v%byte(len(chars))]
}
return string(b)
}

50
vendor/github.com/moby/buildkit/util/testutil/tar.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package testutil
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"github.com/pkg/errors"
)
type TarItem struct {
Header *tar.Header
Data []byte
}
func ReadTarToMap(dt []byte, compressed bool) (map[string]*TarItem, error) {
m := map[string]*TarItem{}
var r io.Reader = bytes.NewBuffer(dt)
if compressed {
gz, err := gzip.NewReader(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating gzip reader")
}
defer gz.Close()
r = gz
}
tr := tar.NewReader(r)
for {
h, err := tr.Next()
if err != nil {
if err == io.EOF {
return m, nil
}
return nil, errors.Wrap(err, "error reading tar")
}
if _, ok := m[h.Name]; ok {
return nil, errors.Errorf("duplicate entries for %s", h.Name)
}
var dt []byte
if h.Typeflag == tar.TypeReg {
dt, err = io.ReadAll(tr)
if err != nil {
return nil, errors.Wrapf(err, "error reading file")
}
}
m[h.Name] = &TarItem{Header: h, Data: dt}
}
}

10
vendor/modules.txt vendored
View File

@ -181,6 +181,7 @@ github.com/containerd/containerd/errdefs
github.com/containerd/containerd/filters
github.com/containerd/containerd/identifiers
github.com/containerd/containerd/images
github.com/containerd/containerd/images/archive
github.com/containerd/containerd/labels
github.com/containerd/containerd/leases
github.com/containerd/containerd/log
@ -204,7 +205,13 @@ github.com/containerd/containerd/tracing
github.com/containerd/containerd/version
# github.com/containerd/continuity v0.3.0
## explicit; go 1.17
github.com/containerd/continuity
github.com/containerd/continuity/devices
github.com/containerd/continuity/driver
github.com/containerd/continuity/fs
github.com/containerd/continuity/fs/fstest
github.com/containerd/continuity/pathdriver
github.com/containerd/continuity/proto
github.com/containerd/continuity/sysx
# github.com/containerd/ttrpc v1.2.1
## explicit; go 1.13
@ -576,6 +583,9 @@ github.com/moby/buildkit/util/resolver/retryhandler
github.com/moby/buildkit/util/sshutil
github.com/moby/buildkit/util/stack
github.com/moby/buildkit/util/system
github.com/moby/buildkit/util/testutil
github.com/moby/buildkit/util/testutil/dockerd
github.com/moby/buildkit/util/testutil/integration
github.com/moby/buildkit/util/tracing
github.com/moby/buildkit/util/tracing/detect
github.com/moby/buildkit/util/tracing/detect/delegated