Merge pull request #787 from crazy-max/inject-certs

container driver: copy ca and user tls registries certs
This commit is contained in:
Tõnis Tiigi 2021-10-28 15:11:57 -07:00 committed by GitHub
commit 49342dd54d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 11391 additions and 33 deletions

View File

@ -84,6 +84,11 @@ Specifies the configuration file for the buildkitd daemon to use. The configurat
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
Note that if you create a `docker-container` builder and have specified
certificates for registries in the `buildkitd.toml` configuration, the files
will be copied into the container under `/etc/buildkit/certs` and configuration
will be updated to reflect that.
### <a name="driver"></a> Set the builder driver to use (--driver)
```

View File

@ -1,7 +1,6 @@
package docker
import (
"archive/tar"
"bytes"
"context"
"io"
@ -12,6 +11,7 @@ import (
"github.com/docker/buildx/driver"
"github.com/docker/buildx/driver/bkimage"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/docker/api/types"
@ -20,6 +20,8 @@ import (
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
dockerclient "github.com/docker/docker/client"
dockerarchive "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/tracing/detect"
@ -28,12 +30,6 @@ import (
const (
volumeStateSuffix = "_state"
// containerStateDir is the location where buildkitd inside the container
// stores its state. The container driver creates a Linux container, so
// this should match the location for Linux, as defined in:
// https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
containerBuildKitRootDir = "/var/lib/buildkit"
)
type Driver struct {
@ -119,7 +115,7 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
{
Type: mount.TypeVolume,
Source: d.Name + volumeStateSuffix,
Target: containerBuildKitRootDir,
Target: confutil.DefaultBuildKitStateDir,
},
},
}
@ -139,11 +135,12 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
return err
}
if f := d.InitConfig.ConfigFile; f != "" {
buf, err := readFileToTar(f)
configFiles, err := confutil.LoadConfigFiles(f)
if err != nil {
return err
}
if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
defer os.RemoveAll(configFiles)
if err := d.copyToContainer(ctx, configFiles, "/"); err != nil {
return err
}
}
@ -205,6 +202,17 @@ func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
return rc.Close()
}
func (d *Driver) copyToContainer(ctx context.Context, srcPath string, dstDir string) error {
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
})
if err != nil {
return err
}
defer srcArchive.Close()
return d.DockerAPI.CopyToContainer(ctx, d.Name, dstDir, srcArchive, dockertypes.CopyToContainerOptions{})
}
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
execConfig := types.ExecConfig{
Cmd: cmd,
@ -366,29 +374,6 @@ func (d *demux) Read(dt []byte) (int, error) {
return d.Reader.Read(dt)
}
func readFileToTar(fn string) (*bytes.Buffer, error) {
buf := bytes.NewBuffer(nil)
tw := tar.NewWriter(buf)
dt, err := ioutil.ReadFile(fn)
if err != nil {
return nil, err
}
if err := tw.WriteHeader(&tar.Header{
Name: "/etc/buildkit/buildkitd.toml",
Size: int64(len(dt)),
Mode: 0644,
}); err != nil {
return nil, err
}
if _, err := tw.Write(dt); err != nil {
return nil, err
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
type logWriter struct {
logger progress.SubLogger
stream int

1
go.mod
View File

@ -34,6 +34,7 @@ require (
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.8.1

25
util/confutil/config.go Normal file
View File

@ -0,0 +1,25 @@
package confutil
import (
"os"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
)
// loadConfigTree loads BuildKit config toml tree
func loadConfigTree(fp string) (*toml.Tree, error) {
f, err := os.Open(fp)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, errors.Wrapf(err, "failed to load config from %s", fp)
}
defer f.Close()
t, err := toml.LoadReader(f)
if err != nil {
return t, errors.Wrap(err, "failed to parse config")
}
return t, nil
}

144
util/confutil/container.go Normal file
View File

@ -0,0 +1,144 @@
package confutil
import (
"io"
"os"
"path"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
)
const (
// DefaultBuildKitStateDir and DefaultBuildKitConfigDir are the location
// where buildkitd inside the container stores its state. Some drivers
// create a Linux container, so this should match the location for Linux,
// as defined in: https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
DefaultBuildKitStateDir = "/var/lib/buildkit"
DefaultBuildKitConfigDir = "/etc/buildkit"
)
// LoadConfigFiles creates a temp directory with BuildKit config and
// registry certificates ready to be copied to a container.
func LoadConfigFiles(bkconfig string) (string, error) {
if _, err := os.Stat(bkconfig); errors.Is(err, os.ErrNotExist) {
return "", errors.Wrapf(err, "buildkit configuration file not found: %s", bkconfig)
} else if err != nil {
return "", errors.Wrapf(err, "invalid buildkit configuration file: %s", bkconfig)
}
// Load config tree
btoml, err := loadConfigTree(bkconfig)
if err != nil {
return "", err
}
// Temp dir that will be copied to the container
tmpDir, err := os.MkdirTemp("", "buildkitd-config")
if err != nil {
return "", err
}
// Create BuildKit config folders
tmpBuildKitConfigDir := path.Join(tmpDir, DefaultBuildKitConfigDir)
tmpBuildKitCertsDir := path.Join(tmpBuildKitConfigDir, "certs")
if err := os.MkdirAll(tmpBuildKitCertsDir, 0700); err != nil {
return "", err
}
// Iterate through registry config to copy certs and update
// BuildKit config with the underlying certs' path in the container.
//
// The following BuildKit config:
//
// [registry."myregistry.io"]
// ca=["/etc/config/myca.pem"]
// [[registry."myregistry.io".keypair]]
// key="/etc/config/key.pem"
// cert="/etc/config/cert.pem"
//
// will be translated in the container as:
//
// [registry."myregistry.io"]
// ca=["/etc/buildkit/certs/myregistry.io/myca.pem"]
// [[registry."myregistry.io".keypair]]
// key="/etc/buildkit/certs/myregistry.io/key.pem"
// cert="/etc/buildkit/certs/myregistry.io/cert.pem"
if btoml.Has("registry") {
for regName := range btoml.GetArray("registry").(*toml.Tree).Values() {
regConf := btoml.GetPath([]string{"registry", regName}).(*toml.Tree)
if regConf == nil {
continue
}
regCertsDir := path.Join(tmpBuildKitCertsDir, regName)
if err := os.Mkdir(regCertsDir, 0755); err != nil {
return "", err
}
if regConf.Has("ca") {
regCAs := regConf.GetArray("ca").([]string)
if len(regCAs) > 0 {
var cas []string
for _, ca := range regCAs {
cas = append(cas, path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(ca)))
if err := copyfile(ca, path.Join(regCertsDir, path.Base(ca))); err != nil {
return "", err
}
}
regConf.Set("ca", cas)
}
}
if regConf.Has("keypair") {
regKeyPairs := regConf.GetArray("keypair").([]*toml.Tree)
if len(regKeyPairs) == 0 {
continue
}
for _, kp := range regKeyPairs {
if kp == nil {
continue
}
key := kp.Get("key").(string)
if len(key) > 0 {
kp.Set("key", path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(key)))
if err := copyfile(key, path.Join(regCertsDir, path.Base(key))); err != nil {
return "", err
}
}
cert := kp.Get("cert").(string)
if len(cert) > 0 {
kp.Set("cert", path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(cert)))
if err := copyfile(cert, path.Join(regCertsDir, path.Base(cert))); err != nil {
return "", err
}
}
}
}
}
}
// Write BuildKit config
bkfile, err := os.OpenFile(path.Join(tmpBuildKitConfigDir, "buildkitd.toml"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return "", err
}
_, err = btoml.WriteTo(bkfile)
if err != nil {
return "", err
}
return tmpDir, nil
}
func copyfile(src string, dst string) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer df.Close()
_, err = io.Copy(df, sf)
return err
}

View File

@ -0,0 +1,62 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userns
import (
"bufio"
"fmt"
"os"
"sync"
)
var (
inUserNS bool
nsOnce sync.Once
)
// RunningInUserNS detects whether we are currently running in a user namespace.
// Originally copied from github.com/lxc/lxd/shared/util.go
func RunningInUserNS() bool {
nsOnce.Do(func() {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return
}
inUserNS = true
})
return inUserNS
}

View File

@ -0,0 +1,25 @@
// +build !linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userns
// RunningInUserNS is a stub for non-Linux systems
// Always returns false
func RunningInUserNS() bool {
return false
}

View File

@ -0,0 +1 @@
This code provides helper functions for dealing with archive files.

1334
vendor/github.com/docker/docker/pkg/archive/archive.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,100 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
if format == OverlayWhiteoutFormat {
if inUserNS {
return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns")
}
return overlayWhiteoutConverter{}, nil
}
return nil, nil
}
type overlayWhiteoutConverter struct {
}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
if err != nil {
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
}
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,7 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
return nil, nil
}

View File

@ -0,0 +1,115 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containerd/containerd/pkg/userns"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return p // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
}
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
inode = s.Ino
}
return
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() {
// In most cases, cannot create a device if running in user namespace
err = nil
}
return err
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,67 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return filepath.ToSlash(p)
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
return noPermPart | permPart
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Inode in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// no notion of file ownership mapping yet on Windows
return idtools.Identity{UID: 0, GID: 0}, nil
}

445
vendor/github.com/docker/docker/pkg/archive/changes.go generated vendored Normal file
View File

@ -0,0 +1,445 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar doesn't have sub-second mtime precision. The go tar
// writer (1.10+) does when using PAX format, but we round times to seconds
// to ensure archives have the same hashes for backwards compatibility.
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
//
// Non-sub-second is problematic when we apply changes via tar
// files. We handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
!bytes.Equal(oldChild.capability, newChild.capability) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@ -0,0 +1,286 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of unix.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

View File

@ -0,0 +1,97 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@ -0,0 +1,43 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size or modification time for dirs, its not a good
// measure of change. See https://github.com/moby/moby/issues/9874
// for a description of the issue with modification time, and
// https://github.com/moby/moby/pull/11422 for the change.
// (Note that in the Windows implementation of this function,
// modification time IS taken as a change). See
// https://github.com/moby/moby/pull/37982 for more information.
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return fi.Sys().(*syscall.Stat_t).Ino
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@ -0,0 +1,34 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Note there is slight difference between the Linux and Windows
// implementations here. Due to https://github.com/moby/moby/issues/9874,
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
// consider a change to the directory time as a change. Windows on NTFS
// does. See https://github.com/moby/moby/pull/37982 for more information.
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

487
vendor/github.com/docker/docker/pkg/archive/copy.go generated vendored Normal file
View File

@ -0,0 +1,487 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in the separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
// Ensure paths are in platform semantics
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath, sep) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(sep)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
cleanedPath += string(sep)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string, sep byte) bool {
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string, sep byte) bool {
return len(path) > 0 && path[len(path)-1] == sep
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(filepath.FromSlash(path))
if specifiesCurrentDir(path) {
cleanedPath += string(os.PathSeparator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions (the TarOptions struct)
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Stat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path, os.PathSeparator):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
// srcContent tar stream, as served by TarWithOptions(), is
// definitely in PAX format, but tar.Next() mistakenly guesses it
// as USTAR, which creates a problem: if the newBase is >100
// characters long, WriteHeader() returns an error like
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
//
// To fix, set the format to PAX here. See docker/for-linux issue #484.
hdr.Format = tar.FormatPAX
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
}
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
// Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
// and https://cure53.de/pentest-report_opa.pdf, which recommends to
// replace io.Copy with io.CopyN7. The latter allows to specify the
// maximum number of bytes that should be read. By properly defining
// the limit, it can be assured that a GZip compression bomb cannot
// easily cause a Denial-of-Service.
// After reviewing with @tonistiigi and @cpuguy83, this should not
// affect us, because here we do not read into memory, hence should
// not be vulnerable to this code consuming memory.
//nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path, os.PathSeparator) &&
filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) &&
!specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path, os.PathSeparator) &&
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -0,0 +1,11 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@ -0,0 +1,9 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

260
vendor/github.com/docker/docker/pkg/archive/diff.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := remapIDs(idMapping, srcHdr); err != nil {
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
if runtime.GOOS != "windows" {
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask)
}
if decompress {
decompLayer, err := DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompLayer.Close()
layer = decompLayer
}
return UnpackLayer(dest, layer, options)
}

View File

@ -0,0 +1,16 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = (1 << 30) - 2
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@ -0,0 +1,16 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@ -0,0 +1,23 @@
package archive // import "github.com/docker/docker/pkg/archive"
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

59
vendor/github.com/docker/docker/pkg/archive/wrap.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

241
vendor/github.com/docker/docker/pkg/idtools/idtools.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package idtools // import "github.com/docker/docker/pkg/idtools"
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName = "/etc/subuid"
subgidFileName = "/etc/subgid"
)
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership and permissions.
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, true)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership and permissions.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, false, true)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership or permissions will be performed
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, false)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
}
gid, err := toHost(0, gidMap)
if err != nil {
return -1, -1, err
}
return uid, gid, nil
}
// toContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func toContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
// toHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func toHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
}
// Identity is either a UID and GID pair or a SID (but not both)
type Identity struct {
UID int
GID int
SID string
}
// IdentityMapping contains a mappings of UIDs and GIDs
type IdentityMapping struct {
uids []IDMap
gids []IDMap
}
// NewIDMappingsFromMaps creates a new mapping from two slices
// Deprecated: this is a temporary shim while transitioning to IDMapping
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
return &IdentityMapping{uids: uids, gids: gids}
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i *IdentityMapping) RootPair() Identity {
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
return Identity{UID: uid, GID: gid}
}
// ToHost returns the host UID and GID for the container uid, gid.
// Remapping is only performed if the ids aren't already the remapped root ids
func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) {
var err error
target := i.RootPair()
if pair.UID != target.UID {
target.UID, err = toHost(pair.UID, i.uids)
if err != nil {
return target, err
}
}
if pair.GID != target.GID {
target.GID, err = toHost(pair.GID, i.gids)
}
return target, err
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) {
uid, err := toContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
gid, err := toContainer(pair.GID, i.gids)
return uid, gid, err
}
// Empty returns true if there are no id mappings
func (i *IdentityMapping) Empty() bool {
return len(i.uids) == 0 && len(i.gids) == 0
}
// UIDs return the UID mapping
// TODO: remove this once everything has been refactored to use pairs
func (i *IdentityMapping) UIDs() []IDMap {
return i.uids
}
// GIDs return the UID mapping
// TODO: remove this once everything has been refactored to use pairs
func (i *IdentityMapping) GIDs() []IDMap {
return i.gids
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
func parseSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
text := strings.TrimSpace(s.Text())
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, s.Err()
}
// CurrentIdentity returns the identity of the current process
func CurrentIdentity() Identity {
return Identity{UID: os.Getuid(), GID: os.Getegid()}
}

View File

@ -0,0 +1,295 @@
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"sync"
"syscall"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/pkg/errors"
)
var (
entOnce sync.Once
getentCmd string
)
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
stat, err := system.Stat(path)
if err == nil {
if !stat.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if !chownExisting {
return nil
}
// short-circuit--we were called with an existing directory and chown was requested
return setPermissions(path, mode, owner.UID, owner.GID, stat)
}
if os.IsNotExist(err) {
paths = []string{path}
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := system.MkdirAll(path, mode); err != nil {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil {
return err
}
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
func CanAccess(path string, pair Identity) bool {
statInfo, err := system.Stat(path)
if err != nil {
return false
}
fileMode := os.FileMode(statInfo.Mode())
permBits := fileMode.Perm()
return accessible(statInfo.UID() == uint32(pair.UID),
statInfo.GID() == uint32(pair.GID), permBits)
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
if isOwner && (perms&0100 == 0100) {
return true
}
if isGroup && (perms&0010 == 0010) {
return true
}
if perms&0001 == 0001 {
return true
}
return false
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUser(name string) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUser(name)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
usr, err = getentUser(name)
if err != nil {
return user.User{}, err
}
return usr, nil
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUID(uid int) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUid(uid)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
return getentUser(strconv.Itoa(uid))
}
func getentUser(name string) (user.User, error) {
reader, err := callGetent("passwd", name)
if err != nil {
return user.User{}, err
}
users, err := user.ParsePasswd(reader)
if err != nil {
return user.User{}, err
}
if len(users) == 0 {
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name)
}
return users[0], nil
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGroup(name string) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGroup(name)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(name)
}
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGID(gid int) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGid(gid)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(strconv.Itoa(gid))
}
func getentGroup(name string) (user.Group, error) {
reader, err := callGetent("group", name)
if err != nil {
return user.Group{}, err
}
groups, err := user.ParseGroup(reader)
if err != nil {
return user.Group{}, err
}
if len(groups) == 0 {
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name)
}
return groups[0], nil
}
func callGetent(database, key string) (io.Reader, error) {
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
// if no `getent` command on host, can't do anything else
if getentCmd == "" {
return nil, fmt.Errorf("unable to find getent command")
}
out, err := execCmd(getentCmd, database, key)
if err != nil {
exitCode, errC := system.GetExitCode(err)
if errC != nil {
return nil, err
}
switch exitCode {
case 1:
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
case 2:
return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database)
case 3:
return nil, fmt.Errorf("getent database doesn't support enumeration")
default:
return nil, err
}
}
return bytes.NewReader(out), nil
}
// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
// dir is on an NFS share, so don't call chown unless we absolutely must.
// Likewise for setting permissions.
func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error {
if stat == nil {
var err error
stat, err = system.Stat(p)
if err != nil {
return err
}
}
if os.FileMode(stat.Mode()).Perm() != mode.Perm() {
if err := os.Chmod(p, mode.Perm()); err != nil {
return err
}
}
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
}
return os.Chown(p, uid, gid)
}
// NewIdentityMapping takes a requested username and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func NewIdentityMapping(name string) (*IdentityMapping, error) {
usr, err := LookupUser(name)
if err != nil {
return nil, fmt.Errorf("Could not get user for username %s: %v", name, err)
}
subuidRanges, err := lookupSubUIDRanges(usr)
if err != nil {
return nil, err
}
subgidRanges, err := lookupSubGIDRanges(usr)
if err != nil {
return nil, err
}
return &IdentityMapping{
uids: subuidRanges,
gids: subgidRanges,
}, nil
}
func lookupSubUIDRanges(usr user.User) ([]IDMap, error) {
rangeList, err := parseSubuid(strconv.Itoa(usr.Uid))
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
rangeList, err = parseSubuid(usr.Name)
if err != nil {
return nil, err
}
}
if len(rangeList) == 0 {
return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name)
}
return createIDMap(rangeList), nil
}
func lookupSubGIDRanges(usr user.User) ([]IDMap, error) {
rangeList, err := parseSubgid(strconv.Itoa(usr.Uid))
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
rangeList, err = parseSubgid(usr.Name)
if err != nil {
return nil, err
}
}
if len(rangeList) == 0 {
return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name)
}
return createIDMap(rangeList), nil
}

View File

@ -0,0 +1,34 @@
package idtools // import "github.com/docker/docker/pkg/idtools"
import (
"os"
"github.com/docker/docker/pkg/system"
)
const (
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
const (
ContainerAdministratorSidString = "S-1-5-93-2-1"
ContainerUserSidString = "S-1-5-93-2-2"
)
// This is currently a wrapper around MkdirAll, however, since currently
// permissions aren't set through this path, the identity isn't utilized.
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
if err := system.MkdirAll(path, mode); err != nil {
return err
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
// Windows does not require/support this function, so always return true
func CanAccess(path string, identity Identity) bool {
return true
}

View File

@ -0,0 +1,164 @@
package idtools // import "github.com/docker/docker/pkg/idtools"
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
)
const (
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
)
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
}
return uid, gid, nil
}
func addUser(name string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
var args []string
switch userCommand {
case "adduser":
args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name}
case "useradd":
args = []string{"-r", "-s", "/bin/false", name}
default:
return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
if out, err := execCmd(userCommand, args...); err != nil {
return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := parseSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
}
ranges, err = parseSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := parseSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := parseSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}

View File

@ -0,0 +1,12 @@
// +build !linux
package idtools // import "github.com/docker/docker/pkg/idtools"
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
}

View File

@ -0,0 +1,31 @@
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"
import (
"fmt"
"os/exec"
"path/filepath"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
// only return no error if the final resolved binary basename
// matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd string, arg ...string) ([]byte, error) {
execCmd := exec.Command(cmd, arg...)
return execCmd.CombinedOutput()
}

137
vendor/github.com/docker/docker/pkg/pools/pools.go generated vendored Normal file
View File

@ -0,0 +1,137 @@
// Package pools provides a collection of pools which provide various
// data types with buffers. These can be used to lower the number of
// memory allocations and reuse buffers.
//
// New pools should be added to this package to allow them to be
// shared across packages.
//
// Utility functions which operate on pools should be added to this
// package to allow them to be reused.
package pools // import "github.com/docker/docker/pkg/pools"
import (
"bufio"
"io"
"sync"
"github.com/docker/docker/pkg/ioutils"
)
const buffer32K = 32 * 1024
var (
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
buffer32KPool = newBufferPoolWithSize(buffer32K)
)
// BufioReaderPool is a bufio reader that uses sync.Pool.
type BufioReaderPool struct {
pool sync.Pool
}
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
return &BufioReaderPool{
pool: sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
},
}
}
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
buf := bufPool.pool.Get().(*bufio.Reader)
buf.Reset(r)
return buf
}
// Put puts the bufio.Reader back into the pool.
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
b.Reset(nil)
bufPool.pool.Put(b)
}
type bufferPool struct {
pool sync.Pool
}
func newBufferPoolWithSize(size int) *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} { s := make([]byte, size); return &s },
},
}
}
func (bp *bufferPool) Get() *[]byte {
return bp.pool.Get().(*[]byte)
}
func (bp *bufferPool) Put(b *[]byte) {
bp.pool.Put(b)
}
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
buf := buffer32KPool.Get()
written, err = io.CopyBuffer(dst, src, *buf)
buffer32KPool.Put(buf)
return
}
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
// into the pool and closes the reader if it's an io.ReadCloser.
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
return ioutils.NewReadCloserWrapper(r, func() error {
if readCloser, ok := r.(io.ReadCloser); ok {
readCloser.Close()
}
bufPool.Put(buf)
return nil
})
}
// BufioWriterPool is a bufio writer that uses sync.Pool.
type BufioWriterPool struct {
pool sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
return &BufioWriterPool{
pool: sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
},
}
}
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
buf := bufPool.pool.Get().(*bufio.Writer)
buf.Reset(w)
return buf
}
// Put puts the bufio.Writer back into the pool.
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
b.Reset(nil)
bufPool.pool.Put(b)
}
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
// into the pool and closes the writer if it's an io.Writecloser.
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
return ioutils.NewWriteCloserWrapper(w, func() error {
buf.Flush()
if writeCloser, ok := w.(io.WriteCloser); ok {
writeCloser.Close()
}
bufPool.Put(buf)
return nil
})
}

191
vendor/github.com/opencontainers/runc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

17
vendor/github.com/opencontainers/runc/NOTICE generated vendored Normal file
View File

@ -0,0 +1,17 @@
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -0,0 +1,156 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package user
import (
"io"
"os"
"strconv"
"golang.org/x/sys/unix"
)
// Unix-specific path to the passwd and group formatted files.
const (
unixPasswdPath = "/etc/passwd"
unixGroupPath = "/etc/group"
)
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Uid == uid
})
}
func lookupUserFunc(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, ErrNoPasswdEntries
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Gid == gid
})
}
func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, ErrNoGroupEntries
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
func GetPasswdPath() (string, error) {
return unixPasswdPath, nil
}
func GetPasswd() (io.ReadCloser, error) {
return os.Open(unixPasswdPath)
}
func GetGroupPath() (string, error) {
return unixGroupPath, nil
}
func GetGroup() (io.ReadCloser, error) {
return os.Open(unixGroupPath)
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(unix.Getuid())
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(unix.Getgid())
}
func currentUserSubIDs(fileName string) ([]SubID, error) {
u, err := CurrentUser()
if err != nil {
return nil, err
}
filter := func(entry SubID) bool {
return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
}
return ParseSubIDFileFilter(fileName, filter)
}
func CurrentUserSubUIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subuid")
}
func CurrentUserSubGIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subgid")
}
func CurrentProcessUIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/uid_map")
}
func CurrentProcessGIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/gid_map")
}

View File

@ -0,0 +1,604 @@
package user
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
minID = 0
maxID = 1<<31 - 1 // for 32-bit systems compatibility
)
var (
// ErrNoPasswdEntries is returned if no matching entries were found in /etc/group.
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
// ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd.
ErrNoGroupEntries = errors.New("no matching entries in group file")
// ErrRange is returned if a UID or GID is outside of the valid range.
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID)
)
type User struct {
Name string
Pass string
Uid int
Gid int
Gecos string
Home string
Shell string
}
type Group struct {
Name string
Pass string
Gid int
List []string
}
// SubID represents an entry in /etc/sub{u,g}id
type SubID struct {
Name string
SubID int64
Count int64
}
// IDMap represents an entry in /proc/PID/{u,g}id_map
type IDMap struct {
ID int64
ParentID int64
Count int64
}
func parseLine(line []byte, v ...interface{}) {
parseParts(bytes.Split(line, []byte(":")), v...)
}
func parseParts(parts [][]byte, v ...interface{}) {
if len(parts) == 0 {
return
}
for i, p := range parts {
// Ignore cases where we don't have enough fields to populate the arguments.
// Some configuration files like to misbehave.
if len(v) <= i {
break
}
// Use the type of the argument to figure out how to parse it, scanf() style.
// This is legit.
switch e := v[i].(type) {
case *string:
*e = string(p)
case *int:
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
*e, _ = strconv.Atoi(string(p))
case *int64:
*e, _ = strconv.ParseInt(string(p), 10, 64)
case *[]string:
// Comma-separated lists.
if len(p) != 0 {
*e = strings.Split(string(p), ",")
} else {
*e = []string{}
}
default:
// Someone goof'd when writing code using this function. Scream so they can hear us.
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
}
}
}
func ParsePasswdFile(path string) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswd(passwd)
}
func ParsePasswd(passwd io.Reader) ([]User, error) {
return ParsePasswdFilter(passwd, nil)
}
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswdFilter(passwd, filter)
}
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
if r == nil {
return nil, fmt.Errorf("nil source for passwd-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []User{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 5 passwd
// name:password:UID:GID:GECOS:directory:shell
// Name:Pass:Uid:Gid:Gecos:Home:Shell
// root:x:0:0:root:/root:/bin/bash
// adm:x:3:4:adm:/var/adm:/bin/false
p := User{}
parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
func ParseGroupFile(path string) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroup(group)
}
func ParseGroup(group io.Reader) ([]Group, error) {
return ParseGroupFilter(group, nil)
}
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroupFilter(group, filter)
}
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
if r == nil {
return nil, fmt.Errorf("nil source for group-formatted data")
}
rd := bufio.NewReader(r)
out := []Group{}
// Read the file line-by-line.
for {
var (
isPrefix bool
wholeLine []byte
err error
)
// Read the next line. We do so in chunks (as much as reader's
// buffer is able to keep), check if we read enough columns
// already on each step and store final result in wholeLine.
for {
var line []byte
line, isPrefix, err = rd.ReadLine()
if err != nil {
// We should return no error if EOF is reached
// without a match.
if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12
err = nil
}
return out, err
}
// Simple common case: line is short enough to fit in a
// single reader's buffer.
if !isPrefix && len(wholeLine) == 0 {
wholeLine = line
break
}
wholeLine = append(wholeLine, line...)
// Check if we read the whole line already.
if !isPrefix {
break
}
}
// There's no spec for /etc/passwd or /etc/group, but we try to follow
// the same rules as the glibc parser, which allows comments and blank
// space at the beginning of a line.
wholeLine = bytes.TrimSpace(wholeLine)
if len(wholeLine) == 0 || wholeLine[0] == '#' {
continue
}
// see: man 5 group
// group_name:password:GID:user_list
// Name:Pass:Gid:List
// root:x:0:root
// adm:x:4:root,adm,daemon
p := Group{}
parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List)
if filter == nil || filter(p) {
out = append(out, p)
}
}
}
type ExecUser struct {
Uid int
Gid int
Sgids []int
Home string
}
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
// given file paths and uses that data as the arguments to GetExecUser. If the
// files cannot be opened for any reason, the error is ignored and a nil
// io.Reader is passed instead.
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
var passwd, group io.Reader
if passwdFile, err := os.Open(passwdPath); err == nil {
passwd = passwdFile
defer passwdFile.Close()
}
if groupFile, err := os.Open(groupPath); err == nil {
group = groupFile
defer groupFile.Close()
}
return GetExecUser(userSpec, defaults, passwd, group)
}
// GetExecUser parses a user specification string (using the passwd and group
// readers as sources for /etc/passwd and /etc/group data, respectively). In
// the case of blank fields or missing data from the sources, the values in
// defaults is used.
//
// GetExecUser will return an error if a user or group literal could not be
// found in any entry in passwd and group respectively.
//
// Examples of valid user specifications are:
// * ""
// * "user"
// * "uid"
// * "user:group"
// * "uid:gid
// * "user:gid"
// * "uid:group"
//
// It should be noted that if you specify a numeric user or group id, they will
// not be evaluated as usernames (only the metadata will be filled). So attempting
// to parse a user with user.Name = "1337" will produce the user with a UID of
// 1337.
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
if defaults == nil {
defaults = new(ExecUser)
}
// Copy over defaults.
user := &ExecUser{
Uid: defaults.Uid,
Gid: defaults.Gid,
Sgids: defaults.Sgids,
Home: defaults.Home,
}
// Sgids slice *cannot* be nil.
if user.Sgids == nil {
user.Sgids = []int{}
}
// Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
var userArg, groupArg string
parseLine([]byte(userSpec), &userArg, &groupArg)
// Convert userArg and groupArg to be numeric, so we don't have to execute
// Atoi *twice* for each iteration over lines.
uidArg, uidErr := strconv.Atoi(userArg)
gidArg, gidErr := strconv.Atoi(groupArg)
// Find the matching user.
users, err := ParsePasswdFilter(passwd, func(u User) bool {
if userArg == "" {
// Default to current state of the user.
return u.Uid == user.Uid
}
if uidErr == nil {
// If the userArg is numeric, always treat it as a UID.
return uidArg == u.Uid
}
return u.Name == userArg
})
// If we can't find the user, we have to bail.
if err != nil && passwd != nil {
if userArg == "" {
userArg = strconv.Itoa(user.Uid)
}
return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
}
var matchedUserName string
if len(users) > 0 {
// First match wins, even if there's more than one matching entry.
matchedUserName = users[0].Name
user.Uid = users[0].Uid
user.Gid = users[0].Gid
user.Home = users[0].Home
} else if userArg != "" {
// If we can't find a user with the given username, the only other valid
// option is if it's a numeric username with no associated entry in passwd.
if uidErr != nil {
// Not numeric.
return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
}
user.Uid = uidArg
// Must be inside valid uid range.
if user.Uid < minID || user.Uid > maxID {
return nil, ErrRange
}
// Okay, so it's numeric. We can just roll with this.
}
// On to the groups. If we matched a username, we need to do this because of
// the supplementary group IDs.
if groupArg != "" || matchedUserName != "" {
groups, err := ParseGroupFilter(group, func(g Group) bool {
// If the group argument isn't explicit, we'll just search for it.
if groupArg == "" {
// Check if user is a member of this group.
for _, u := range g.List {
if u == matchedUserName {
return true
}
}
return false
}
if gidErr == nil {
// If the groupArg is numeric, always treat it as a GID.
return gidArg == g.Gid
}
return g.Name == groupArg
})
if err != nil && group != nil {
return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
}
// Only start modifying user.Gid if it is in explicit form.
if groupArg != "" {
if len(groups) > 0 {
// First match wins, even if there's more than one matching entry.
user.Gid = groups[0].Gid
} else {
// If we can't find a group with the given name, the only other valid
// option is if it's a numeric group name with no associated entry in group.
if gidErr != nil {
// Not numeric.
return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
}
user.Gid = gidArg
// Must be inside valid gid range.
if user.Gid < minID || user.Gid > maxID {
return nil, ErrRange
}
// Okay, so it's numeric. We can just roll with this.
}
} else if len(groups) > 0 {
// Supplementary group ids only make sense if in the implicit form.
user.Sgids = make([]int, len(groups))
for i, group := range groups {
user.Sgids[i] = group.Gid
}
}
}
return user, nil
}
// GetAdditionalGroups looks up a list of groups by name or group id
// against the given /etc/group formatted data. If a group name cannot
// be found, an error will be returned. If a group id cannot be found,
// or the given group data is nil, the id will be returned as-is
// provided it is in the legal range.
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
groups := []Group{}
if group != nil {
var err error
groups, err = ParseGroupFilter(group, func(g Group) bool {
for _, ag := range additionalGroups {
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
return true
}
}
return false
})
if err != nil {
return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
}
}
gidMap := make(map[int]struct{})
for _, ag := range additionalGroups {
var found bool
for _, g := range groups {
// if we found a matched group either by name or gid, take the
// first matched as correct
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
if _, ok := gidMap[g.Gid]; !ok {
gidMap[g.Gid] = struct{}{}
found = true
break
}
}
}
// we asked for a group but didn't find it. let's check to see
// if we wanted a numeric group
if !found {
gid, err := strconv.ParseInt(ag, 10, 64)
if err != nil {
return nil, fmt.Errorf("Unable to find group %s", ag)
}
// Ensure gid is inside gid range.
if gid < minID || gid > maxID {
return nil, ErrRange
}
gidMap[int(gid)] = struct{}{}
}
}
gids := []int{}
for gid := range gidMap {
gids = append(gids, gid)
}
return gids, nil
}
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
// that opens the groupPath given and gives it as an argument to
// GetAdditionalGroups.
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
var group io.Reader
if groupFile, err := os.Open(groupPath); err == nil {
group = groupFile
defer groupFile.Close()
}
return GetAdditionalGroups(additionalGroups, group)
}
func ParseSubIDFile(path string) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubID(subid)
}
func ParseSubID(subid io.Reader) ([]SubID, error) {
return ParseSubIDFilter(subid, nil)
}
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubIDFilter(subid, filter)
}
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
if r == nil {
return nil, fmt.Errorf("nil source for subid-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []SubID{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 5 subuid
p := SubID{}
parseLine(line, &p.Name, &p.SubID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
func ParseIDMapFile(path string) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMap(r)
}
func ParseIDMap(r io.Reader) ([]IDMap, error) {
return ParseIDMapFilter(r, nil)
}
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMapFilter(r, filter)
}
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
if r == nil {
return nil, fmt.Errorf("nil source for idmap-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []IDMap{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 7 user_namespaces
p := IDMap{}
parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}

View File

@ -0,0 +1,42 @@
// +build gofuzz
package user
import (
"io"
"strings"
)
func IsDivisbleBy(n int, divisibleby int) bool {
return (n % divisibleby) == 0
}
func FuzzUser(data []byte) int {
if len(data) == 0 {
return -1
}
if !IsDivisbleBy(len(data), 5) {
return -1
}
var divided [][]byte
chunkSize := len(data) / 5
for i := 0; i < len(data); i += chunkSize {
end := i + chunkSize
divided = append(divided, data[i:end])
}
_, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil)
var passwd, group io.Reader
group = strings.NewReader(string(divided[1]))
_, _ = GetAdditionalGroups([]string{string(divided[2])}, group)
passwd = strings.NewReader(string(divided[3]))
_, _ = GetExecUser(string(divided[4]), nil, passwd, group)
return 1
}

2
vendor/github.com/pelletier/go-toml/.dockerignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
cmd/tomll/tomll
cmd/tomljson/tomljson

5
vendor/github.com/pelletier/go-toml/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
test_program/test_program_bin
fuzz/
cmd/tomll/tomll
cmd/tomljson/tomljson
cmd/tomltestgen/tomltestgen

132
vendor/github.com/pelletier/go-toml/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,132 @@
## Contributing
Thank you for your interest in go-toml! We appreciate you considering
contributing to go-toml!
The main goal is the project is to provide an easy-to-use TOML
implementation for Go that gets the job done and gets out of your way
dealing with TOML is probably not the central piece of your project.
As the single maintainer of go-toml, time is scarce. All help, big or
small, is more than welcomed!
### Ask questions
Any question you may have, somebody else might have it too. Always feel
free to ask them on the [issues tracker][issues-tracker]. We will try to
answer them as clearly and quickly as possible, time permitting.
Asking questions also helps us identify areas where the documentation needs
improvement, or new features that weren't envisioned before. Sometimes, a
seemingly innocent question leads to the fix of a bug. Don't hesitate and
ask away!
### Improve the documentation
The best way to share your knowledge and experience with go-toml is to
improve the documentation. Fix a typo, clarify an interface, add an
example, anything goes!
The documentation is present in the [README][readme] and thorough the
source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a
change to the documentation, create a pull request with your proposed
changes. For simple changes like that, the easiest way to go is probably
the "Fork this project and edit the file" button on Github, displayed at
the top right of the file. Unless it's a trivial change (for example a
typo), provide a little bit of context in your pull request description or
commit message.
### Report a bug
Found a bug! Sorry to hear that :(. Help us and other track them down and
fix by reporting it. [File a new bug report][bug-report] on the [issues
tracker][issues-tracker]. The template should provide enough guidance on
what to include. When in doubt: add more details! By reducing ambiguity and
providing more information, it decreases back and forth and saves everyone
time.
### Code changes
Want to contribute a patch? Very happy to hear that!
First, some high-level rules:
* A short proposal with some POC code is better than a lengthy piece of
text with no code. Code speaks louder than words.
* No backward-incompatible patch will be accepted unless discussed.
Sometimes it's hard, and Go's lack of versioning by default does not
help, but we try not to break people's programs unless we absolutely have
to.
* If you are writing a new feature or extending an existing one, make sure
to write some documentation.
* Bug fixes need to be accompanied with regression tests.
* New code needs to be tested.
* Your commit messages need to explain why the change is needed, even if
already included in the PR description.
It does sound like a lot, but those best practices are here to save time
overall and continuously improve the quality of the project, which is
something everyone benefits from.
#### Get started
The fairly standard code contribution process looks like that:
1. [Fork the project][fork].
2. Make your changes, commit on any branch you like.
3. [Open up a pull request][pull-request]
4. Review, potential ask for changes.
5. Merge. You're in!
Feel free to ask for help! You can create draft pull requests to gather
some early feedback!
#### Run the tests
You can run tests for go-toml using Go's test tool: `go test ./...`.
When creating a pull requests, all tests will be ran on Linux on a few Go
versions (Travis CI), and on Windows using the latest Go version
(AppVeyor).
#### Style
Try to look around and follow the same format and structure as the rest of
the code. We enforce using `go fmt` on the whole code base.
---
### Maintainers-only
#### Merge pull request
Checklist:
* Passing CI.
* Does not introduce backward-incompatible changes (unless discussed).
* Has relevant doc changes.
* Has relevant unit tests.
1. Merge using "squash and merge".
2. Make sure to edit the commit message to keep all the useful information
nice and clean.
3. Make sure the commit title is clear and contains the PR number (#123).
#### New release
1. Go to [releases][releases]. Click on "X commits to master since this
release".
2. Make note of all the changes. Look for backward incompatible changes,
new features, and bug fixes.
3. Pick the new version using the above and semver.
4. Create a [new release][new-release].
5. Follow the same format as [1.1.0][release-110].
[issues-tracker]: https://github.com/pelletier/go-toml/issues
[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
[readme]: ./README.md
[fork]: https://help.github.com/articles/fork-a-repo
[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
[releases]: https://github.com/pelletier/go-toml/releases
[new-release]: https://github.com/pelletier/go-toml/releases/new
[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0

11
vendor/github.com/pelletier/go-toml/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,11 @@
FROM golang:1.12-alpine3.9 as builder
WORKDIR /go/src/github.com/pelletier/go-toml
COPY . .
ENV CGO_ENABLED=0
ENV GOOS=linux
RUN go install ./...
FROM scratch
COPY --from=builder /go/bin/tomll /usr/bin/tomll
COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml

247
vendor/github.com/pelletier/go-toml/LICENSE generated vendored Normal file
View File

@ -0,0 +1,247 @@
The bulk of github.com/pelletier/go-toml is distributed under the MIT license
(see below), with the exception of localtime.go and localtime.test.go.
Those two files have been copied over from Google's civil library at revision
ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache
2.0 license (see below).
github.com/pelletier/go-toml:
The MIT License (MIT)
Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
localtime.go, localtime_test.go:
Originals:
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go
Changes:
* Renamed files from civil* to localtime*.
* Package changed from civil to toml.
* 'Local' prefix added to all structs.
License:
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

29
vendor/github.com/pelletier/go-toml/Makefile generated vendored Normal file
View File

@ -0,0 +1,29 @@
export CGO_ENABLED=0
go := go
go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
out.tools := tomll tomljson jsontoml
out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
sources := $(wildcard **/*.go)
.PHONY:
tools: $(out.tools)
$(out.tools): $(sources)
GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
.PHONY:
dist: $(out.dist)
$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
if [ "$(go.goos)" = "windows" ]; then \
tar -cJf $@ $^.exe; \
else \
tar -cJf $@ $^; \
fi
.PHONY:
clean:
rm -rf $(out.tools) $(out.dist)

View File

@ -0,0 +1,5 @@
**Issue:** add link to pelletier/go-toml issue here
Explanation of what this pull request does.
More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).

176
vendor/github.com/pelletier/go-toml/README.md generated vendored Normal file
View File

@ -0,0 +1,176 @@
# go-toml
Go library for the [TOML](https://toml.io/) format.
This library supports TOML version
[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3)
[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml)
[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master)
[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml)
[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
## Development status
** Consider go-toml v2!**
The next version of go-toml is in [active development][v2-dev], and
[nearing completion][v2-map].
Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs],
and [much faster][v2-bench]. If you only need reading and writing TOML documents
(majority of cases), those features are implemented and the API unlikely to
change.
The remaining features (Document structure editing and tooling) will be added
shortly. While pull-requests are welcome on v1, no active development is
expected on it. When v2.0.0 is released, v1 will be deprecated.
👉 [go-toml v2][v2]
[v2]: https://github.com/pelletier/go-toml/tree/v2
[v2-map]: https://github.com/pelletier/go-toml/discussions/506
[v2-dev]: https://github.com/pelletier/go-toml/tree/v2
[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed
[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks
## Features
Go-toml provides the following features for using data parsed from TOML documents:
* Load TOML documents from files and string data
* Easily navigate TOML structure using Tree
* Marshaling and unmarshaling to and from data structures
* Line & column position data for all parsed elements
* [Query support similar to JSON-Path](query/)
* Syntax errors contain line and column numbers
## Import
```go
import "github.com/pelletier/go-toml"
```
## Usage example
Read a TOML document:
```go
config, _ := toml.Load(`
[postgres]
user = "pelletier"
password = "mypassword"`)
// retrieve data directly
user := config.Get("postgres.user").(string)
// or using an intermediate object
postgresConfig := config.Get("postgres").(*toml.Tree)
password := postgresConfig.Get("password").(string)
```
Or use Unmarshal:
```go
type Postgres struct {
User string
Password string
}
type Config struct {
Postgres Postgres
}
doc := []byte(`
[Postgres]
User = "pelletier"
Password = "mypassword"`)
config := Config{}
toml.Unmarshal(doc, &config)
fmt.Println("user=", config.Postgres.User)
```
Or use a query:
```go
// use a query to gather elements without walking the tree
q, _ := query.Compile("$..[user,password]")
results := q.Execute(config)
for ii, item := range results.Values() {
fmt.Printf("Query result %d: %v\n", ii, item)
}
```
## Documentation
The documentation and additional examples are available at
[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml).
## Tools
Go-toml provides three handy command line tools:
* `tomll`: Reads TOML files and lints them.
```
go install github.com/pelletier/go-toml/cmd/tomll
tomll --help
```
* `tomljson`: Reads a TOML file and outputs its JSON representation.
```
go install github.com/pelletier/go-toml/cmd/tomljson
tomljson --help
```
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
```
go install github.com/pelletier/go-toml/cmd/jsontoml
jsontoml --help
```
### Docker image
Those tools are also available as a Docker image from
[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to
use `tomljson`:
```
docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml
```
Only master (`latest`) and tagged versions are published to dockerhub. You
can build your own image as usual:
```
docker build -t go-toml .
```
## Contribute
Feel free to report bugs and patches using GitHub's pull requests system on
[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
much appreciated!
### Run tests
`go test ./...`
### Fuzzing
The script `./fuzz.sh` is available to
run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
## Versioning
Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
this document. The last two major versions of Go are supported
(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
## License
The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE).

188
vendor/github.com/pelletier/go-toml/azure-pipelines.yml generated vendored Normal file
View File

@ -0,0 +1,188 @@
trigger:
- master
stages:
- stage: run_checks
displayName: "Check"
dependsOn: []
jobs:
- job: fmt
displayName: "fmt"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.16"
inputs:
version: "1.16"
- task: Go@0
displayName: "go fmt ./..."
inputs:
command: 'custom'
customCommand: 'fmt'
arguments: './...'
- job: coverage
displayName: "coverage"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.16"
inputs:
version: "1.16"
- task: Go@0
displayName: "Generate coverage"
inputs:
command: 'test'
arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
- task: Bash@3
inputs:
targetType: 'inline'
script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
env:
CODECOV_TOKEN: $(CODECOV_TOKEN)
- job: benchmark
displayName: "benchmark"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.16"
inputs:
version: "1.16"
- script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
- task: Bash@3
inputs:
filePath: './benchmark.sh'
arguments: "master $(Build.Repository.Uri)"
- job: go_unit_tests
displayName: "unit tests"
strategy:
matrix:
linux 1.16:
goVersion: '1.16'
imageName: 'ubuntu-latest'
mac 1.16:
goVersion: '1.16'
imageName: 'macOS-latest'
windows 1.16:
goVersion: '1.16'
imageName: 'windows-latest'
linux 1.15:
goVersion: '1.15'
imageName: 'ubuntu-latest'
mac 1.15:
goVersion: '1.15'
imageName: 'macOS-latest'
windows 1.15:
goVersion: '1.15'
imageName: 'windows-latest'
pool:
vmImage: $(imageName)
steps:
- task: GoTool@0
displayName: "Install Go $(goVersion)"
inputs:
version: $(goVersion)
- task: Go@0
displayName: "go test ./..."
inputs:
command: 'test'
arguments: './...'
- stage: build_binaries
displayName: "Build binaries"
dependsOn: run_checks
jobs:
- job: build_binary
displayName: "Build binary"
strategy:
matrix:
linux_amd64:
GOOS: linux
GOARCH: amd64
darwin_amd64:
GOOS: darwin
GOARCH: amd64
windows_amd64:
GOOS: windows
GOARCH: amd64
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go"
inputs:
version: 1.16
- task: Bash@3
inputs:
targetType: inline
script: "make dist"
env:
go.goos: $(GOOS)
go.goarch: $(GOARCH)
- task: CopyFiles@2
inputs:
sourceFolder: '$(Build.SourcesDirectory)'
contents: '*.tar.xz'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
artifactName: binaries
- stage: build_binaries_manifest
displayName: "Build binaries manifest"
dependsOn: build_binaries
jobs:
- job: build_manifest
displayName: "Build binaries manifest"
steps:
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'current'
downloadType: 'single'
artifactName: 'binaries'
downloadPath: '$(Build.SourcesDirectory)'
- task: Bash@3
inputs:
targetType: inline
script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
artifactName: manifest
- stage: build_docker_image
displayName: "Build Docker image"
dependsOn: run_checks
jobs:
- job: build
displayName: "Build"
pool:
vmImage: ubuntu-latest
steps:
- task: Docker@2
inputs:
command: 'build'
Dockerfile: 'Dockerfile'
buildContext: '.'
addPipelineData: false
- stage: publish_docker_image
displayName: "Publish Docker image"
dependsOn: build_docker_image
condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
jobs:
- job: publish
displayName: "Publish"
pool:
vmImage: ubuntu-latest
steps:
- task: Docker@2
inputs:
containerRegistry: 'DockerHub'
repository: 'pelletier/go-toml'
command: 'buildAndPush'
Dockerfile: 'Dockerfile'
buildContext: '.'
tags: 'latest'

35
vendor/github.com/pelletier/go-toml/benchmark.sh generated vendored Normal file
View File

@ -0,0 +1,35 @@
#!/bin/bash
set -ex
reference_ref=${1:-master}
reference_git=${2:-.}
if ! `hash benchstat 2>/dev/null`; then
echo "Installing benchstat"
go get golang.org/x/perf/cmd/benchstat
fi
tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
ref_tempdir="${tempdir}/ref"
ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
local_benchmark="`pwd`/benchmark-local.txt"
echo "=== ${reference_ref} (${ref_tempdir})"
git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
pushd ${ref_tempdir} >/dev/null
git checkout ${reference_ref} >/dev/null 2>/dev/null
go test -bench=. -benchmem | tee ${ref_benchmark}
cd benchmark
go test -bench=. -benchmem | tee -a ${ref_benchmark}
popd >/dev/null
echo ""
echo "=== local"
go test -bench=. -benchmem | tee ${local_benchmark}
cd benchmark
go test -bench=. -benchmem | tee -a ${local_benchmark}
echo ""
echo "=== diff"
benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}

23
vendor/github.com/pelletier/go-toml/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Package toml is a TOML parser and manipulation library.
//
// This version supports the specification as described in
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
//
// Marshaling
//
// Go-toml can marshal and unmarshal TOML documents from and to data
// structures.
//
// TOML document as a tree
//
// Go-toml can operate on a TOML document as a tree. Use one of the Load*
// functions to parse TOML data and obtain a Tree instance, then one of its
// methods to manipulate the tree.
//
// JSONPath-like queries
//
// The package github.com/pelletier/go-toml/query implements a system
// similar to JSONPath to quickly retrieve elements of a TOML document using a
// single expression. See the package documentation for more information.
//
package toml

30
vendor/github.com/pelletier/go-toml/example-crlf.toml generated vendored Normal file
View File

@ -0,0 +1,30 @@
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported

30
vendor/github.com/pelletier/go-toml/example.toml generated vendored Normal file
View File

@ -0,0 +1,30 @@
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported

31
vendor/github.com/pelletier/go-toml/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// +build gofuzz
package toml
func Fuzz(data []byte) int {
tree, err := LoadBytes(data)
if err != nil {
if tree != nil {
panic("tree must be nil if there is an error")
}
return 0
}
str, err := tree.ToTomlString()
if err != nil {
if str != "" {
panic(`str must be "" if there is an error`)
}
panic(err)
}
tree, err = Load(str)
if err != nil {
if tree != nil {
panic("tree must be nil if there is an error")
}
return 0
}
return 1
}

15
vendor/github.com/pelletier/go-toml/fuzz.sh generated vendored Normal file
View File

@ -0,0 +1,15 @@
#! /bin/sh
set -eu
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
if [ ! -e toml-fuzz.zip ]; then
go-fuzz-build github.com/pelletier/go-toml
fi
rm -fr fuzz
mkdir -p fuzz/corpus
cp *.toml fuzz/corpus
go-fuzz -bin=toml-fuzz.zip -workdir=fuzz

3
vendor/github.com/pelletier/go-toml/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/pelletier/go-toml
go 1.12

112
vendor/github.com/pelletier/go-toml/keysparsing.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// Parsing keys handling both bare and quoted keys.
package toml
import (
"errors"
"fmt"
)
// Convert the bare key group string to an array.
// The input supports double quotation and single quotation,
// but escape sequences are not supported. Lexers must unescape them beforehand.
func parseKey(key string) ([]string, error) {
runes := []rune(key)
var groups []string
if len(key) == 0 {
return nil, errors.New("empty key")
}
idx := 0
for idx < len(runes) {
for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
// skip leading whitespace
}
if idx >= len(runes) {
break
}
r := runes[idx]
if isValidBareChar(r) {
// parse bare key
startIdx := idx
endIdx := -1
idx++
for idx < len(runes) {
r = runes[idx]
if isValidBareChar(r) {
idx++
} else if r == '.' {
endIdx = idx
break
} else if isSpace(r) {
endIdx = idx
for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
// skip trailing whitespace
}
if idx < len(runes) && runes[idx] != '.' {
return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx])
}
break
} else {
return nil, fmt.Errorf("invalid bare key character: %c", r)
}
}
if endIdx == -1 {
endIdx = idx
}
groups = append(groups, string(runes[startIdx:endIdx]))
} else if r == '\'' {
// parse single quoted key
idx++
startIdx := idx
for {
if idx >= len(runes) {
return nil, fmt.Errorf("unclosed single-quoted key")
}
r = runes[idx]
if r == '\'' {
groups = append(groups, string(runes[startIdx:idx]))
idx++
break
}
idx++
}
} else if r == '"' {
// parse double quoted key
idx++
startIdx := idx
for {
if idx >= len(runes) {
return nil, fmt.Errorf("unclosed double-quoted key")
}
r = runes[idx]
if r == '"' {
groups = append(groups, string(runes[startIdx:idx]))
idx++
break
}
idx++
}
} else if r == '.' {
idx++
if idx >= len(runes) {
return nil, fmt.Errorf("unexpected end of key")
}
r = runes[idx]
if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' {
return nil, fmt.Errorf("expecting key part after dot")
}
} else {
return nil, fmt.Errorf("invalid key character: %c", r)
}
}
if len(groups) == 0 {
return nil, fmt.Errorf("empty key")
}
return groups, nil
}
func isValidBareChar(r rune) bool {
return isAlphanumeric(r) || r == '-' || isDigit(r)
}

1031
vendor/github.com/pelletier/go-toml/lexer.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

287
vendor/github.com/pelletier/go-toml/localtime.go generated vendored Normal file
View File

@ -0,0 +1,287 @@
// Implementation of TOML's local date/time.
//
// Copied over from Google's civil to avoid pulling all the Google dependencies.
// Originals:
// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
// Changes:
// * Renamed files from civil* to localtime*.
// * Package changed from civil to toml.
// * 'Local' prefix added to all structs.
//
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package civil implements types for civil time, a time-zone-independent
// representation of time that follows the rules of the proleptic
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
// minutes.
//
// Because they lack location information, these types do not represent unique
// moments or intervals of time. Use time.Time for that purpose.
package toml
import (
"fmt"
"time"
)
// A LocalDate represents a date (year, month, day).
//
// This type does not include location information, and therefore does not
// describe a unique 24-hour timespan.
type LocalDate struct {
Year int // Year (e.g., 2014).
Month time.Month // Month of the year (January = 1, ...).
Day int // Day of the month, starting at 1.
}
// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
func LocalDateOf(t time.Time) LocalDate {
var d LocalDate
d.Year, d.Month, d.Day = t.Date()
return d
}
// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
func ParseLocalDate(s string) (LocalDate, error) {
t, err := time.Parse("2006-01-02", s)
if err != nil {
return LocalDate{}, err
}
return LocalDateOf(t), nil
}
// String returns the date in RFC3339 full-date format.
func (d LocalDate) String() string {
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
}
// IsValid reports whether the date is valid.
func (d LocalDate) IsValid() bool {
return LocalDateOf(d.In(time.UTC)) == d
}
// In returns the time corresponding to time 00:00:00 of the date in the location.
//
// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
// and
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
// return 23:00:00 on April 30, 1955.
//
// In panics if loc is nil.
func (d LocalDate) In(loc *time.Location) time.Time {
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
}
// AddDays returns the date that is n days in the future.
// n can also be negative to go into the past.
func (d LocalDate) AddDays(n int) LocalDate {
return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
}
// DaysSince returns the signed number of days between the date and s, not including the end day.
// This is the inverse operation to AddDays.
func (d LocalDate) DaysSince(s LocalDate) (days int) {
// We convert to Unix time so we do not have to worry about leap seconds:
// Unix time increases by exactly 86400 seconds per day.
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
return int(deltaUnix / 86400)
}
// Before reports whether d1 occurs before d2.
func (d1 LocalDate) Before(d2 LocalDate) bool {
if d1.Year != d2.Year {
return d1.Year < d2.Year
}
if d1.Month != d2.Month {
return d1.Month < d2.Month
}
return d1.Day < d2.Day
}
// After reports whether d1 occurs after d2.
func (d1 LocalDate) After(d2 LocalDate) bool {
return d2.Before(d1)
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of d.String().
func (d LocalDate) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The date is expected to be a string in a format accepted by ParseLocalDate.
func (d *LocalDate) UnmarshalText(data []byte) error {
var err error
*d, err = ParseLocalDate(string(data))
return err
}
// A LocalTime represents a time with nanosecond precision.
//
// This type does not include location information, and therefore does not
// describe a unique moment in time.
//
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
type LocalTime struct {
Hour int // The hour of the day in 24-hour format; range [0-23]
Minute int // The minute of the hour; range [0-59]
Second int // The second of the minute; range [0-59]
Nanosecond int // The nanosecond of the second; range [0-999999999]
}
// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
// in that time's location. It ignores the date.
func LocalTimeOf(t time.Time) LocalTime {
var tm LocalTime
tm.Hour, tm.Minute, tm.Second = t.Clock()
tm.Nanosecond = t.Nanosecond()
return tm
}
// ParseLocalTime parses a string and returns the time value it represents.
// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
// the HH:MM:SS part of the string, an optional fractional part may appear,
// consisting of a decimal point followed by one to nine decimal digits.
// (RFC3339 admits only one digit after the decimal point).
func ParseLocalTime(s string) (LocalTime, error) {
t, err := time.Parse("15:04:05.999999999", s)
if err != nil {
return LocalTime{}, err
}
return LocalTimeOf(t), nil
}
// String returns the date in the format described in ParseLocalTime. If Nanoseconds
// is zero, no fractional part will be generated. Otherwise, the result will
// end with a fractional part consisting of a decimal point and nine digits.
func (t LocalTime) String() string {
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
if t.Nanosecond == 0 {
return s
}
return s + fmt.Sprintf(".%09d", t.Nanosecond)
}
// IsValid reports whether the time is valid.
func (t LocalTime) IsValid() bool {
// Construct a non-zero time.
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
return LocalTimeOf(tm) == t
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of t.String().
func (t LocalTime) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The time is expected to be a string in a format accepted by ParseLocalTime.
func (t *LocalTime) UnmarshalText(data []byte) error {
var err error
*t, err = ParseLocalTime(string(data))
return err
}
// A LocalDateTime represents a date and time.
//
// This type does not include location information, and therefore does not
// describe a unique moment in time.
type LocalDateTime struct {
Date LocalDate
Time LocalTime
}
// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
func LocalDateTimeOf(t time.Time) LocalDateTime {
return LocalDateTime{
Date: LocalDateOf(t),
Time: LocalTimeOf(t),
}
}
// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
// the time offset but includes an optional fractional time, as described in
// ParseLocalTime. Informally, the accepted format is
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
// where the 'T' may be a lower-case 't'.
func ParseLocalDateTime(s string) (LocalDateTime, error) {
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
if err != nil {
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
if err != nil {
return LocalDateTime{}, err
}
}
return LocalDateTimeOf(t), nil
}
// String returns the date in the format described in ParseLocalDate.
func (dt LocalDateTime) String() string {
return dt.Date.String() + "T" + dt.Time.String()
}
// IsValid reports whether the datetime is valid.
func (dt LocalDateTime) IsValid() bool {
return dt.Date.IsValid() && dt.Time.IsValid()
}
// In returns the time corresponding to the LocalDateTime in the given location.
//
// If the time is missing or ambigous at the location, In returns the same
// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
// both
// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
// and
// civil.LocalDateTime{
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
// civil.LocalTime{Minute: 30}}.In(loc)
// return 23:30:00 on April 30, 1955.
//
// In panics if loc is nil.
func (dt LocalDateTime) In(loc *time.Location) time.Time {
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
}
// Before reports whether dt1 occurs before dt2.
func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
}
// After reports whether dt1 occurs after dt2.
func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
return dt2.Before(dt1)
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of dt.String().
func (dt LocalDateTime) MarshalText() ([]byte, error) {
return []byte(dt.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
func (dt *LocalDateTime) UnmarshalText(data []byte) error {
var err error
*dt, err = ParseLocalDateTime(string(data))
return err
}

1308
vendor/github.com/pelletier/go-toml/marshal.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
title = "TOML Marshal Testing"
[basic_lists]
floats = [12.3,45.6,78.9]
bools = [true,false,true]
dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
ints = [8001,8001,8002]
uints = [5002,5003]
strings = ["One","Two","Three"]
[[subdocptrs]]
name = "Second"
[basic_map]
one = "one"
two = "two"
[subdoc]
[subdoc.second]
name = "Second"
[subdoc.first]
name = "First"
[basic]
uint = 5001
bool = true
float = 123.4
float64 = 123.456782132399
int = 5000
string = "Bite me"
date = 1979-05-27T07:32:00Z
[[subdoclist]]
name = "List.First"
[[subdoclist]]
name = "List.Second"

39
vendor/github.com/pelletier/go-toml/marshal_test.toml generated vendored Normal file
View File

@ -0,0 +1,39 @@
title = "TOML Marshal Testing"
[basic]
bool = true
date = 1979-05-27T07:32:00Z
float = 123.4
float64 = 123.456782132399
int = 5000
string = "Bite me"
uint = 5001
[basic_lists]
bools = [true,false,true]
dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
floats = [12.3,45.6,78.9]
ints = [8001,8001,8002]
strings = ["One","Two","Three"]
uints = [5002,5003]
[basic_map]
one = "one"
two = "two"
[subdoc]
[subdoc.first]
name = "First"
[subdoc.second]
name = "Second"
[[subdoclist]]
name = "List.First"
[[subdoclist]]
name = "List.Second"
[[subdocptrs]]
name = "Second"

508
vendor/github.com/pelletier/go-toml/parser.go generated vendored Normal file
View File

@ -0,0 +1,508 @@
// TOML Parser.
package toml
import (
"errors"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"time"
)
type tomlParser struct {
flowIdx int
flow []token
tree *Tree
currentTable []string
seenTableKeys []string
}
type tomlParserStateFn func() tomlParserStateFn
// Formats and panics an error message based on a token
func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
}
func (p *tomlParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *tomlParser) peek() *token {
if p.flowIdx >= len(p.flow) {
return nil
}
return &p.flow[p.flowIdx]
}
func (p *tomlParser) assume(typ tokenType) {
tok := p.getToken()
if tok == nil {
p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
}
if tok.typ != typ {
p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
}
}
func (p *tomlParser) getToken() *token {
tok := p.peek()
if tok == nil {
return nil
}
p.flowIdx++
return tok
}
func (p *tomlParser) parseStart() tomlParserStateFn {
tok := p.peek()
// end of stream, parsing is finished
if tok == nil {
return nil
}
switch tok.typ {
case tokenDoubleLeftBracket:
return p.parseGroupArray
case tokenLeftBracket:
return p.parseGroup
case tokenKey:
return p.parseAssign
case tokenEOF:
return nil
case tokenError:
p.raiseError(tok, "parsing error: %s", tok.String())
default:
p.raiseError(tok, "unexpected token %s", tok.typ)
}
return nil
}
func (p *tomlParser) parseGroupArray() tomlParserStateFn {
startToken := p.getToken() // discard the [[
key := p.getToken()
if key.typ != tokenKeyGroupArray {
p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
}
// get or create table array element at the indicated part in the path
keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid table array key: %s", err)
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys)
var array []*Tree
if destTree == nil {
array = make([]*Tree, 0)
} else if target, ok := destTree.([]*Tree); ok && target != nil {
array = destTree.([]*Tree)
} else {
p.raiseError(key, "key %s is already assigned and not of type table array", key)
}
p.currentTable = keys
// add a new tree to the end of the table array
newTree := newTree()
newTree.position = startToken.Position
array = append(array, newTree)
p.tree.SetPath(p.currentTable, array)
// remove all keys that were children of this table array
prefix := key.val + "."
found := false
for ii := 0; ii < len(p.seenTableKeys); {
tableKey := p.seenTableKeys[ii]
if strings.HasPrefix(tableKey, prefix) {
p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
} else {
found = (tableKey == key.val)
ii++
}
}
// keep this key name from use by other kinds of assignments
if !found {
p.seenTableKeys = append(p.seenTableKeys, key.val)
}
// move to next parser state
p.assume(tokenDoubleRightBracket)
return p.parseStart
}
func (p *tomlParser) parseGroup() tomlParserStateFn {
startToken := p.getToken() // discard the [
key := p.getToken()
if key.typ != tokenKeyGroup {
p.raiseError(key, "unexpected token %s, was expecting a table key", key)
}
for _, item := range p.seenTableKeys {
if item == key.val {
p.raiseError(key, "duplicated tables")
}
}
p.seenTableKeys = append(p.seenTableKeys, key.val)
keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid table array key: %s", err)
}
if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
p.raiseError(key, "%s", err)
}
destTree := p.tree.GetPath(keys)
if target, ok := destTree.(*Tree); ok && target != nil && target.inline {
p.raiseError(key, "could not re-define exist inline table or its sub-table : %s",
strings.Join(keys, "."))
}
p.assume(tokenRightBracket)
p.currentTable = keys
return p.parseStart
}
func (p *tomlParser) parseAssign() tomlParserStateFn {
key := p.getToken()
p.assume(tokenEqual)
parsedKey, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid key: %s", err.Error())
}
value := p.parseRvalue()
var tableKey []string
if len(p.currentTable) > 0 {
tableKey = p.currentTable
} else {
tableKey = []string{}
}
prefixKey := parsedKey[0 : len(parsedKey)-1]
tableKey = append(tableKey, prefixKey...)
// find the table to assign, looking out for arrays of tables
var targetNode *Tree
switch node := p.tree.GetPath(tableKey).(type) {
case []*Tree:
targetNode = node[len(node)-1]
case *Tree:
targetNode = node
case nil:
// create intermediate
if err := p.tree.createSubTree(tableKey, key.Position); err != nil {
p.raiseError(key, "could not create intermediate group: %s", err)
}
targetNode = p.tree.GetPath(tableKey).(*Tree)
default:
p.raiseError(key, "Unknown table type for path: %s",
strings.Join(tableKey, "."))
}
if targetNode.inline {
p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s",
strings.Join(tableKey, "."))
}
// assign value to the found table
keyVal := parsedKey[len(parsedKey)-1]
localKey := []string{keyVal}
finalKey := append(tableKey, keyVal)
if targetNode.GetPath(localKey) != nil {
p.raiseError(key, "The following key was defined twice: %s",
strings.Join(finalKey, "."))
}
var toInsert interface{}
switch value.(type) {
case *Tree, []*Tree:
toInsert = value
default:
toInsert = &tomlValue{value: value, position: key.Position}
}
targetNode.values[keyVal] = toInsert
return p.parseStart
}
var errInvalidUnderscore = errors.New("invalid use of _ in number")
func numberContainsInvalidUnderscore(value string) error {
// For large numbers, you may use underscores between digits to enhance
// readability. Each underscore must be surrounded by at least one digit on
// each side.
hasBefore := false
for idx, r := range value {
if r == '_' {
if !hasBefore || idx+1 >= len(value) {
// can't end with an underscore
return errInvalidUnderscore
}
}
hasBefore = isDigit(r)
}
return nil
}
var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number")
func hexNumberContainsInvalidUnderscore(value string) error {
hasBefore := false
for idx, r := range value {
if r == '_' {
if !hasBefore || idx+1 >= len(value) {
// can't end with an underscore
return errInvalidUnderscoreHex
}
}
hasBefore = isHexDigit(r)
}
return nil
}
func cleanupNumberToken(value string) string {
cleanedVal := strings.Replace(value, "_", "", -1)
return cleanedVal
}
func (p *tomlParser) parseRvalue() interface{} {
tok := p.getToken()
if tok == nil || tok.typ == tokenEOF {
p.raiseError(tok, "expecting a value")
}
switch tok.typ {
case tokenString:
return tok.val
case tokenTrue:
return true
case tokenFalse:
return false
case tokenInf:
if tok.val[0] == '-' {
return math.Inf(-1)
}
return math.Inf(1)
case tokenNan:
return math.NaN()
case tokenInteger:
cleanedVal := cleanupNumberToken(tok.val)
var err error
var val int64
if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
switch cleanedVal[1] {
case 'x':
err = hexNumberContainsInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
case 'o':
err = numberContainsInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
case 'b':
err = numberContainsInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
default:
panic("invalid base") // the lexer should catch this first
}
} else {
err = numberContainsInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err = strconv.ParseInt(cleanedVal, 10, 64)
}
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenFloat:
err := numberContainsInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
cleanedVal := cleanupNumberToken(tok.val)
val, err := strconv.ParseFloat(cleanedVal, 64)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenLocalTime:
val, err := ParseLocalTime(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenLocalDate:
// a local date may be followed by:
// * nothing: this is a local date
// * a local time: this is a local date-time
next := p.peek()
if next == nil || next.typ != tokenLocalTime {
val, err := ParseLocalDate(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
}
localDate := tok
localTime := p.getToken()
next = p.peek()
if next == nil || next.typ != tokenTimeOffset {
v := localDate.val + "T" + localTime.val
val, err := ParseLocalDateTime(v)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
}
offset := p.getToken()
layout := time.RFC3339Nano
v := localDate.val + "T" + localTime.val + offset.val
val, err := time.ParseInLocation(layout, v, time.UTC)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenLeftBracket:
return p.parseArray()
case tokenLeftCurlyBrace:
return p.parseInlineTable()
case tokenEqual:
p.raiseError(tok, "cannot have multiple equals for the same key")
case tokenError:
p.raiseError(tok, "%s", tok)
default:
panic(fmt.Errorf("unhandled token: %v", tok))
}
return nil
}
func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma
}
func (p *tomlParser) parseInlineTable() *Tree {
tree := newTree()
var previous *token
Loop:
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated inline table")
}
switch follow.typ {
case tokenRightCurlyBrace:
p.getToken()
break Loop
case tokenKey, tokenInteger, tokenString:
if !tokenIsComma(previous) && previous != nil {
p.raiseError(follow, "comma expected between fields in inline table")
}
key := p.getToken()
p.assume(tokenEqual)
parsedKey, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid key: %s", err)
}
value := p.parseRvalue()
tree.SetPath(parsedKey, value)
case tokenComma:
if tokenIsComma(previous) {
p.raiseError(follow, "need field between two commas in inline table")
}
p.getToken()
default:
p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
}
previous = follow
}
if tokenIsComma(previous) {
p.raiseError(previous, "trailing comma at the end of inline table")
}
tree.inline = true
return tree
}
func (p *tomlParser) parseArray() interface{} {
var array []interface{}
arrayType := reflect.TypeOf(newTree())
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated array")
}
if follow.typ == tokenRightBracket {
p.getToken()
break
}
val := p.parseRvalue()
if reflect.TypeOf(val) != arrayType {
arrayType = nil
}
array = append(array, val)
follow = p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated array")
}
if follow.typ != tokenRightBracket && follow.typ != tokenComma {
p.raiseError(follow, "missing comma")
}
if follow.typ == tokenComma {
p.getToken()
}
}
// if the array is a mixed-type array or its length is 0,
// don't convert it to a table array
if len(array) <= 0 {
arrayType = nil
}
// An array of Trees is actually an array of inline
// tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*Tree,
// the two notations would not be equivalent.
if arrayType == reflect.TypeOf(newTree()) {
tomlArray := make([]*Tree, len(array))
for i, v := range array {
tomlArray[i] = v.(*Tree)
}
return tomlArray
}
return array
}
func parseToml(flow []token) *Tree {
result := newTree()
result.position = Position{1, 1}
parser := &tomlParser{
flowIdx: 0,
flow: flow,
tree: result,
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
}
parser.run()
return result
}

29
vendor/github.com/pelletier/go-toml/position.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Position support for go-toml
package toml
import (
"fmt"
)
// Position of a document element within a TOML document.
//
// Line and Col are both 1-indexed positions for the element's line number and
// column number, respectively. Values of zero or less will cause Invalid(),
// to return true.
type Position struct {
Line int // line within the document
Col int // column within the line
}
// String representation of the position.
// Displays 1-indexed line and column numbers.
func (p Position) String() string {
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
}
// Invalid returns whether or not the position is valid (i.e. with negative or
// null values)
func (p Position) Invalid() bool {
return p.Line <= 0 || p.Col <= 0
}

136
vendor/github.com/pelletier/go-toml/token.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
package toml
import "fmt"
// Define tokens
type tokenType int
const (
eof = -(iota + 1)
)
const (
tokenError tokenType = iota
tokenEOF
tokenComment
tokenKey
tokenString
tokenInteger
tokenTrue
tokenFalse
tokenFloat
tokenInf
tokenNan
tokenEqual
tokenLeftBracket
tokenRightBracket
tokenLeftCurlyBrace
tokenRightCurlyBrace
tokenLeftParen
tokenRightParen
tokenDoubleLeftBracket
tokenDoubleRightBracket
tokenLocalDate
tokenLocalTime
tokenTimeOffset
tokenKeyGroup
tokenKeyGroupArray
tokenComma
tokenColon
tokenDollar
tokenStar
tokenQuestion
tokenDot
tokenDotDot
tokenEOL
)
var tokenTypeNames = []string{
"Error",
"EOF",
"Comment",
"Key",
"String",
"Integer",
"True",
"False",
"Float",
"Inf",
"NaN",
"=",
"[",
"]",
"{",
"}",
"(",
")",
"]]",
"[[",
"LocalDate",
"LocalTime",
"TimeOffset",
"KeyGroup",
"KeyGroupArray",
",",
":",
"$",
"*",
"?",
".",
"..",
"EOL",
}
type token struct {
Position
typ tokenType
val string
}
func (tt tokenType) String() string {
idx := int(tt)
if idx < len(tokenTypeNames) {
return tokenTypeNames[idx]
}
return "Unknown"
}
func (t token) String() string {
switch t.typ {
case tokenEOF:
return "EOF"
case tokenError:
return t.val
}
return fmt.Sprintf("%q", t.val)
}
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func isAlphanumeric(r rune) bool {
return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_'
}
func isKeyChar(r rune) bool {
// Keys start with the first character that isn't whitespace or [ and end
// with the last non-whitespace character before the equals sign. Keys
// cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
}
func isDigit(r rune) bool {
return '0' <= r && r <= '9'
}
func isHexDigit(r rune) bool {
return isDigit(r) ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}

533
vendor/github.com/pelletier/go-toml/toml.go generated vendored Normal file
View File

@ -0,0 +1,533 @@
package toml
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strings"
)
type tomlValue struct {
value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
comment string
commented bool
multiline bool
literal bool
position Position
}
// Tree is the result of the parsing of a TOML file.
type Tree struct {
values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
comment string
commented bool
inline bool
position Position
}
func newTree() *Tree {
return newTreeWithPosition(Position{})
}
func newTreeWithPosition(pos Position) *Tree {
return &Tree{
values: make(map[string]interface{}),
position: pos,
}
}
// TreeFromMap initializes a new Tree object using the given map.
func TreeFromMap(m map[string]interface{}) (*Tree, error) {
result, err := toTree(m)
if err != nil {
return nil, err
}
return result.(*Tree), nil
}
// Position returns the position of the tree.
func (t *Tree) Position() Position {
return t.position
}
// Has returns a boolean indicating if the given key exists.
func (t *Tree) Has(key string) bool {
if key == "" {
return false
}
return t.HasPath(strings.Split(key, "."))
}
// HasPath returns true if the given path of keys exists, false otherwise.
func (t *Tree) HasPath(keys []string) bool {
return t.GetPath(keys) != nil
}
// Keys returns the keys of the toplevel tree (does not recurse).
func (t *Tree) Keys() []string {
keys := make([]string, len(t.values))
i := 0
for k := range t.values {
keys[i] = k
i++
}
return keys
}
// Get the value at key in the Tree.
// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
// If you need to retrieve non-bare keys, use GetPath.
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
func (t *Tree) Get(key string) interface{} {
if key == "" {
return t
}
return t.GetPath(strings.Split(key, "."))
}
// GetPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPath(keys []string) interface{} {
if len(keys) == 0 {
return t
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return nil
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return nil
}
subtree = node[len(node)-1]
default:
return nil // cannot navigate through other node types
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.value
default:
return node
}
}
// GetArray returns the value at key in the Tree.
// It returns []string, []int64, etc type if key has homogeneous lists
// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetArray(key string) interface{} {
if key == "" {
return t
}
return t.GetArrayPath(strings.Split(key, "."))
}
// GetArrayPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetArrayPath(keys []string) interface{} {
if len(keys) == 0 {
return t
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return nil
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return nil
}
subtree = node[len(node)-1]
default:
return nil // cannot navigate through other node types
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
switch n := node.value.(type) {
case []interface{}:
return getArray(n)
default:
return node.value
}
default:
return node
}
}
// if homogeneous array, then return slice type object over []interface{}
func getArray(n []interface{}) interface{} {
var s []string
var i64 []int64
var f64 []float64
var bl []bool
for _, value := range n {
switch v := value.(type) {
case string:
s = append(s, v)
case int64:
i64 = append(i64, v)
case float64:
f64 = append(f64, v)
case bool:
bl = append(bl, v)
default:
return n
}
}
if len(s) == len(n) {
return s
} else if len(i64) == len(n) {
return i64
} else if len(f64) == len(n) {
return f64
} else if len(bl) == len(n) {
return bl
}
return n
}
// GetPosition returns the position of the given key.
func (t *Tree) GetPosition(key string) Position {
if key == "" {
return t.position
}
return t.GetPositionPath(strings.Split(key, "."))
}
// SetPositionPath sets the position of element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree position is set.
func (t *Tree) SetPositionPath(keys []string, pos Position) {
if len(keys) == 0 {
t.position = pos
return
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return
}
subtree = node[len(node)-1]
default:
return
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
node.position = pos
return
case *Tree:
node.position = pos
return
case []*Tree:
// go to most recent element
if len(node) == 0 {
return
}
node[len(node)-1].position = pos
return
}
}
// GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPositionPath(keys []string) Position {
if len(keys) == 0 {
return t.position
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return Position{0, 0}
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
}
subtree = node[len(node)-1]
default:
return Position{0, 0}
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.position
case *Tree:
return node.position
case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
}
return node[len(node)-1].position
default:
return Position{0, 0}
}
}
// GetDefault works like Get but with a default value
func (t *Tree) GetDefault(key string, def interface{}) interface{} {
val := t.Get(key)
if val == nil {
return def
}
return val
}
// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
// The default values within the struct are valid default options.
type SetOptions struct {
Comment string
Commented bool
Multiline bool
Literal bool
}
// SetWithOptions is the same as Set, but allows you to provide formatting
// instructions to the key, that will be used by Marshal().
func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
t.SetPathWithOptions(strings.Split(key, "."), opts, value)
}
// SetPathWithOptions is the same as SetPath, but allows you to provide
// formatting instructions to the key, that will be reused by Marshal().
func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
subtree := t
for i, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}))
subtree.values[intermediateKey] = node
}
subtree = node[len(node)-1]
}
}
var toInsert interface{}
switch v := value.(type) {
case *Tree:
v.comment = opts.Comment
v.commented = opts.Commented
toInsert = value
case []*Tree:
for i := range v {
v[i].commented = opts.Commented
}
toInsert = value
case *tomlValue:
v.comment = opts.Comment
v.commented = opts.Commented
v.multiline = opts.Multiline
v.literal = opts.Literal
toInsert = v
default:
toInsert = &tomlValue{value: value,
comment: opts.Comment,
commented: opts.Commented,
multiline: opts.Multiline,
literal: opts.Literal,
position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}}
}
subtree.values[keys[len(keys)-1]] = toInsert
}
// Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) Set(key string, value interface{}) {
t.SetWithComment(key, "", false, value)
}
// SetWithComment is the same as Set, but allows you to provide comment
// information to the key, that will be reused by Marshal().
func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
}
// SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) SetPath(keys []string, value interface{}) {
t.SetPathWithComment(keys, "", false, value)
}
// SetPathWithComment is the same as SetPath, but allows you to provide comment
// information to the key, that will be reused by Marshal().
func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value)
}
// Delete removes a key from the tree.
// Key is a dot-separated path (e.g. a.b.c).
func (t *Tree) Delete(key string) error {
keys, err := parseKey(key)
if err != nil {
return err
}
return t.DeletePath(keys)
}
// DeletePath removes a key from the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
func (t *Tree) DeletePath(keys []string) error {
keyLen := len(keys)
if keyLen == 1 {
delete(t.values, keys[0])
return nil
}
tree := t.GetPath(keys[:keyLen-1])
item := keys[keyLen-1]
switch node := tree.(type) {
case *Tree:
delete(node.values, item)
return nil
}
return errors.New("no such key to delete")
}
// createSubTree takes a tree and a key and create the necessary intermediate
// subtrees to create a subtree at that point. In-place.
//
// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
// and tree[a][b][c]
//
// Returns nil on success, error object on failure
func (t *Tree) createSubTree(keys []string, pos Position) error {
subtree := t
for i, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
tree.position = pos
tree.inline = subtree.inline
subtree.values[intermediateKey] = tree
nextTree = tree
}
switch node := nextTree.(type) {
case []*Tree:
subtree = node[len(node)-1]
case *Tree:
subtree = node
default:
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
}
}
return nil
}
// LoadBytes creates a Tree from a []byte.
func LoadBytes(b []byte) (tree *Tree, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = errors.New(r.(string))
}
}()
if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
b = b[4:]
} else if len(b) >= 3 && hasUTF8BOM3(b) {
b = b[3:]
} else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
b = b[2:]
}
tree = parseToml(lexToml(b))
return
}
func hasUTF16BigEndianBOM2(b []byte) bool {
return b[0] == 0xFE && b[1] == 0xFF
}
func hasUTF16LittleEndianBOM2(b []byte) bool {
return b[0] == 0xFF && b[1] == 0xFE
}
func hasUTF8BOM3(b []byte) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
func hasUTF32BigEndianBOM4(b []byte) bool {
return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
}
func hasUTF32LittleEndianBOM4(b []byte) bool {
return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
}
// LoadReader creates a Tree from any io.Reader.
func LoadReader(reader io.Reader) (tree *Tree, err error) {
inputBytes, err := ioutil.ReadAll(reader)
if err != nil {
return
}
tree, err = LoadBytes(inputBytes)
return
}
// Load creates a Tree from a string.
func Load(content string) (tree *Tree, err error) {
return LoadBytes([]byte(content))
}
// LoadFile creates a Tree from a file.
func LoadFile(path string) (tree *Tree, err error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
return LoadReader(file)
}

71
vendor/github.com/pelletier/go-toml/tomlpub.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package toml
// PubTOMLValue wrapping tomlValue in order to access all properties from outside.
type PubTOMLValue = tomlValue
func (ptv *PubTOMLValue) Value() interface{} {
return ptv.value
}
func (ptv *PubTOMLValue) Comment() string {
return ptv.comment
}
func (ptv *PubTOMLValue) Commented() bool {
return ptv.commented
}
func (ptv *PubTOMLValue) Multiline() bool {
return ptv.multiline
}
func (ptv *PubTOMLValue) Position() Position {
return ptv.position
}
func (ptv *PubTOMLValue) SetValue(v interface{}) {
ptv.value = v
}
func (ptv *PubTOMLValue) SetComment(s string) {
ptv.comment = s
}
func (ptv *PubTOMLValue) SetCommented(c bool) {
ptv.commented = c
}
func (ptv *PubTOMLValue) SetMultiline(m bool) {
ptv.multiline = m
}
func (ptv *PubTOMLValue) SetPosition(p Position) {
ptv.position = p
}
// PubTree wrapping Tree in order to access all properties from outside.
type PubTree = Tree
func (pt *PubTree) Values() map[string]interface{} {
return pt.values
}
func (pt *PubTree) Comment() string {
return pt.comment
}
func (pt *PubTree) Commented() bool {
return pt.commented
}
func (pt *PubTree) Inline() bool {
return pt.inline
}
func (pt *PubTree) SetValues(v map[string]interface{}) {
pt.values = v
}
func (pt *PubTree) SetComment(c string) {
pt.comment = c
}
func (pt *PubTree) SetCommented(c bool) {
pt.commented = c
}
func (pt *PubTree) SetInline(i bool) {
pt.inline = i
}

155
vendor/github.com/pelletier/go-toml/tomltree_create.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
package toml
import (
"fmt"
"reflect"
"time"
)
var kindToType = [reflect.String + 1]reflect.Type{
reflect.Bool: reflect.TypeOf(true),
reflect.String: reflect.TypeOf(""),
reflect.Float32: reflect.TypeOf(float64(1)),
reflect.Float64: reflect.TypeOf(float64(1)),
reflect.Int: reflect.TypeOf(int64(1)),
reflect.Int8: reflect.TypeOf(int64(1)),
reflect.Int16: reflect.TypeOf(int64(1)),
reflect.Int32: reflect.TypeOf(int64(1)),
reflect.Int64: reflect.TypeOf(int64(1)),
reflect.Uint: reflect.TypeOf(uint64(1)),
reflect.Uint8: reflect.TypeOf(uint64(1)),
reflect.Uint16: reflect.TypeOf(uint64(1)),
reflect.Uint32: reflect.TypeOf(uint64(1)),
reflect.Uint64: reflect.TypeOf(uint64(1)),
}
// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
// supported values:
// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
func typeFor(k reflect.Kind) reflect.Type {
if k > 0 && int(k) < len(kindToType) {
return kindToType[k]
}
return nil
}
func simpleValueCoercion(object interface{}) (interface{}, error) {
switch original := object.(type) {
case string, bool, int64, uint64, float64, time.Time:
return original, nil
case int:
return int64(original), nil
case int8:
return int64(original), nil
case int16:
return int64(original), nil
case int32:
return int64(original), nil
case uint:
return uint64(original), nil
case uint8:
return uint64(original), nil
case uint16:
return uint64(original), nil
case uint32:
return uint64(original), nil
case float32:
return float64(original), nil
case fmt.Stringer:
return original.String(), nil
case []interface{}:
value := reflect.ValueOf(original)
length := value.Len()
arrayValue := reflect.MakeSlice(value.Type(), 0, length)
for i := 0; i < length; i++ {
val := value.Index(i).Interface()
simpleValue, err := simpleValueCoercion(val)
if err != nil {
return nil, err
}
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
}
return arrayValue.Interface(), nil
default:
return nil, fmt.Errorf("cannot convert type %T to Tree", object)
}
}
func sliceToTree(object interface{}) (interface{}, error) {
// arrays are a bit tricky, since they can represent either a
// collection of simple values, which is represented by one
// *tomlValue, or an array of tables, which is represented by an
// array of *Tree.
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
value := reflect.ValueOf(object)
insideType := value.Type().Elem()
length := value.Len()
if length > 0 {
insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
}
if insideType.Kind() == reflect.Map {
// this is considered as an array of tables
tablesArray := make([]*Tree, 0, length)
for i := 0; i < length; i++ {
table := value.Index(i)
tree, err := toTree(table.Interface())
if err != nil {
return nil, err
}
tablesArray = append(tablesArray, tree.(*Tree))
}
return tablesArray, nil
}
sliceType := typeFor(insideType.Kind())
if sliceType == nil {
sliceType = insideType
}
arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
for i := 0; i < length; i++ {
val := value.Index(i).Interface()
simpleValue, err := simpleValueCoercion(val)
if err != nil {
return nil, err
}
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
}
return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
}
func toTree(object interface{}) (interface{}, error) {
value := reflect.ValueOf(object)
if value.Kind() == reflect.Map {
values := map[string]interface{}{}
keys := value.MapKeys()
for _, key := range keys {
if key.Kind() != reflect.String {
if _, ok := key.Interface().(string); !ok {
return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
}
}
v := value.MapIndex(key)
newValue, err := toTree(v.Interface())
if err != nil {
return nil, err
}
values[key.String()] = newValue
}
return &Tree{values: values, position: Position{}}, nil
}
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
return sliceToTree(object)
}
simpleValue, err := simpleValueCoercion(object)
if err != nil {
return nil, err
}
return &tomlValue{value: simpleValue, position: Position{}}, nil
}

552
vendor/github.com/pelletier/go-toml/tomltree_write.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
package toml
import (
"bytes"
"fmt"
"io"
"math"
"math/big"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type valueComplexity int
const (
valueSimple valueComplexity = iota + 1
valueComplex
)
type sortNode struct {
key string
complexity valueComplexity
}
// Encodes a string to a TOML-compliant multi-line string value
// This function is a clone of the existing encodeTomlString function, except that whitespace characters
// are preserved. Quotation marks and backslashes are also not escaped.
func encodeMultilineTomlString(value string, commented string) string {
var b bytes.Buffer
adjacentQuoteCount := 0
b.WriteString(commented)
for i, rr := range value {
if rr != '"' {
adjacentQuoteCount = 0
} else {
adjacentQuoteCount++
}
switch rr {
case '\b':
b.WriteString(`\b`)
case '\t':
b.WriteString("\t")
case '\n':
b.WriteString("\n" + commented)
case '\f':
b.WriteString(`\f`)
case '\r':
b.WriteString("\r")
case '"':
if adjacentQuoteCount >= 3 || i == len(value)-1 {
adjacentQuoteCount = 0
b.WriteString(`\"`)
} else {
b.WriteString(`"`)
}
case '\\':
b.WriteString(`\`)
default:
intRr := uint16(rr)
if intRr < 0x001F {
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
} else {
b.WriteRune(rr)
}
}
}
return b.String()
}
// Encodes a string to a TOML-compliant string value
func encodeTomlString(value string) string {
var b bytes.Buffer
for _, rr := range value {
switch rr {
case '\b':
b.WriteString(`\b`)
case '\t':
b.WriteString(`\t`)
case '\n':
b.WriteString(`\n`)
case '\f':
b.WriteString(`\f`)
case '\r':
b.WriteString(`\r`)
case '"':
b.WriteString(`\"`)
case '\\':
b.WriteString(`\\`)
default:
intRr := uint16(rr)
if intRr < 0x001F {
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
} else {
b.WriteRune(rr)
}
}
}
return b.String()
}
func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) {
var orderedVals []sortNode
switch ord {
case OrderPreserve:
orderedVals = sortByLines(t)
default:
orderedVals = sortAlphabetical(t)
}
var values []string
for _, node := range orderedVals {
k := node.key
v := t.values[k]
repr, err := tomlValueStringRepresentation(v, "", "", ord, false)
if err != nil {
return "", err
}
values = append(values, quoteKeyIfNeeded(k)+" = "+repr)
}
return "{ " + strings.Join(values, ", ") + " }", nil
}
func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
// this interface check is added to dereference the change made in the writeTo function.
// That change was made to allow this function to see formatting options.
tv, ok := v.(*tomlValue)
if ok {
v = tv.value
} else {
tv = &tomlValue{}
}
switch value := v.(type) {
case uint64:
return strconv.FormatUint(value, 10), nil
case int64:
return strconv.FormatInt(value, 10), nil
case float64:
// Default bit length is full 64
bits := 64
// Float panics if nan is used
if !math.IsNaN(value) {
// if 32 bit accuracy is enough to exactly show, use 32
_, acc := big.NewFloat(value).Float32()
if acc == big.Exact {
bits = 32
}
}
if math.Trunc(value) == value {
return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
}
return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
case string:
if tv.multiline {
if tv.literal {
b := strings.Builder{}
b.WriteString("'''\n")
b.Write([]byte(value))
b.WriteString("\n'''")
return b.String(), nil
} else {
return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
}
}
return "\"" + encodeTomlString(value) + "\"", nil
case []byte:
b, _ := v.([]byte)
return string(b), nil
case bool:
if value {
return "true", nil
}
return "false", nil
case time.Time:
return value.Format(time.RFC3339), nil
case LocalDate:
return value.String(), nil
case LocalDateTime:
return value.String(), nil
case LocalTime:
return value.String(), nil
case *Tree:
return tomlTreeStringRepresentation(value, ord)
case nil:
return "", nil
}
rv := reflect.ValueOf(v)
if rv.Kind() == reflect.Slice {
var values []string
for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface()
itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine)
if err != nil {
return "", err
}
values = append(values, itemRepr)
}
if arraysOneElementPerLine && len(values) > 1 {
stringBuffer := bytes.Buffer{}
valueIndent := indent + ` ` // TODO: move that to a shared encoder state
stringBuffer.WriteString("[\n")
for _, value := range values {
stringBuffer.WriteString(valueIndent)
stringBuffer.WriteString(commented + value)
stringBuffer.WriteString(`,`)
stringBuffer.WriteString("\n")
}
stringBuffer.WriteString(indent + commented + "]")
return stringBuffer.String(), nil
}
return "[" + strings.Join(values, ", ") + "]", nil
}
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
}
func getTreeArrayLine(trees []*Tree) (line int) {
// Prevent returning 0 for empty trees
line = int(^uint(0) >> 1)
// get lowest line number >= 0
for _, tv := range trees {
if tv.position.Line < line || line == 0 {
line = tv.position.Line
}
}
return
}
func sortByLines(t *Tree) (vals []sortNode) {
var (
line int
lines []int
tv *Tree
tom *tomlValue
node sortNode
)
vals = make([]sortNode, 0)
m := make(map[int]sortNode)
for k := range t.values {
v := t.values[k]
switch v.(type) {
case *Tree:
tv = v.(*Tree)
line = tv.position.Line
node = sortNode{key: k, complexity: valueComplex}
case []*Tree:
line = getTreeArrayLine(v.([]*Tree))
node = sortNode{key: k, complexity: valueComplex}
default:
tom = v.(*tomlValue)
line = tom.position.Line
node = sortNode{key: k, complexity: valueSimple}
}
lines = append(lines, line)
vals = append(vals, node)
m[line] = node
}
sort.Ints(lines)
for i, line := range lines {
vals[i] = m[line]
}
return vals
}
func sortAlphabetical(t *Tree) (vals []sortNode) {
var (
node sortNode
simpVals []string
compVals []string
)
vals = make([]sortNode, 0)
m := make(map[string]sortNode)
for k := range t.values {
v := t.values[k]
switch v.(type) {
case *Tree, []*Tree:
node = sortNode{key: k, complexity: valueComplex}
compVals = append(compVals, node.key)
default:
node = sortNode{key: k, complexity: valueSimple}
simpVals = append(simpVals, node.key)
}
vals = append(vals, node)
m[node.key] = node
}
// Simples first to match previous implementation
sort.Strings(simpVals)
i := 0
for _, key := range simpVals {
vals[i] = m[key]
i++
}
sort.Strings(compVals)
for _, key := range compVals {
vals[i] = m[key]
i++
}
return vals
}
func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false)
}
func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) {
var orderedVals []sortNode
switch ord {
case OrderPreserve:
orderedVals = sortByLines(t)
default:
orderedVals = sortAlphabetical(t)
}
for _, node := range orderedVals {
switch node.complexity {
case valueComplex:
k := node.key
v := t.values[k]
combinedKey := quoteKeyIfNeeded(k)
if keyspace != "" {
combinedKey = keyspace + "." + combinedKey
}
switch node := v.(type) {
// node has to be of those two types given how keys are sorted above
case *Tree:
tv, ok := t.values[k].(*Tree)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
}
if tv.comment != "" {
comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
start := "# "
if strings.HasPrefix(comment, "#") {
start = ""
}
writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
var commented string
if parentCommented || t.commented || tv.commented {
commented = "# "
}
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented)
if err != nil {
return bytesCount, err
}
case []*Tree:
for _, subTree := range node {
var commented string
if parentCommented || t.commented || subTree.commented {
commented = "# "
}
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented)
if err != nil {
return bytesCount, err
}
}
}
default: // Simple
k := node.key
v, ok := t.values[k].(*tomlValue)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
}
var commented string
if parentCommented || t.commented || v.commented {
commented = "# "
}
repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
if err != nil {
return bytesCount, err
}
if v.comment != "" {
comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
start := "# "
if strings.HasPrefix(comment, "#") {
start = ""
}
if !compactComments {
writtenBytesCountComment, errc := writeStrings(w, "\n")
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n")
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
quotedKey := quoteKeyIfNeeded(k)
writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
}
}
return bytesCount, nil
}
// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
// quoted keys use the same rules as strings
func quoteKeyIfNeeded(k string) string {
// when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
// keys that have already been quoted.
// not an ideal situation, but good enough of a stop gap.
if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
return k
}
isBare := true
for _, r := range k {
if !isValidBareChar(r) {
isBare = false
break
}
}
if isBare {
return k
}
return quoteKey(k)
}
func quoteKey(k string) string {
return "\"" + encodeTomlString(k) + "\""
}
func writeStrings(w io.Writer, s ...string) (int, error) {
var n int
for i := range s {
b, err := io.WriteString(w, s[i])
n += b
if err != nil {
return n, err
}
}
return n, nil
}
// WriteTo encode the Tree as Toml and writes it to the writer w.
// Returns the number of bytes written in case of success, or an error if anything happened.
func (t *Tree) WriteTo(w io.Writer) (int64, error) {
return t.writeTo(w, "", "", 0, false)
}
// ToTomlString generates a human-readable representation of the current tree.
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error.
func (t *Tree) ToTomlString() (string, error) {
b, err := t.Marshal()
if err != nil {
return "", err
}
return string(b), nil
}
// String generates a human-readable representation of the current tree.
// Alias of ToString. Present to implement the fmt.Stringer interface.
func (t *Tree) String() string {
result, _ := t.ToTomlString()
return result
}
// ToMap recursively generates a representation of the tree using Go built-in structures.
// The following types are used:
//
// * bool
// * float64
// * int64
// * string
// * uint64
// * time.Time
// * map[string]interface{} (where interface{} is any of this list)
// * []interface{} (where interface{} is any of this list)
func (t *Tree) ToMap() map[string]interface{} {
result := map[string]interface{}{}
for k, v := range t.values {
switch node := v.(type) {
case []*Tree:
var array []interface{}
for _, item := range node {
array = append(array, item.ToMap())
}
result[k] = array
case *Tree:
result[k] = node.ToMap()
case *tomlValue:
result[k] = tomlValueToGo(node.value)
}
}
return result
}
func tomlValueToGo(v interface{}) interface{} {
if tree, ok := v.(*Tree); ok {
return tree.ToMap()
}
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Slice {
return v
}
values := make([]interface{}, rv.Len())
for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface()
values[i] = tomlValueToGo(item)
}
return values
}

View File

@ -0,0 +1,6 @@
package toml
// ValueStringRepresentation transforms an interface{} value into its toml string representation.
func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
}

9
vendor/modules.txt vendored
View File

@ -55,6 +55,7 @@ github.com/containerd/containerd/images
github.com/containerd/containerd/labels
github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/seed
github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/reference
github.com/containerd/containerd/remotes
@ -143,12 +144,15 @@ github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
github.com/docker/docker/client
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/fileutils
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/idtools
github.com/docker/docker/pkg/ioutils
github.com/docker/docker/pkg/jsonmessage
github.com/docker/docker/pkg/longpath
github.com/docker/docker/pkg/namesgenerator
github.com/docker/docker/pkg/pools
github.com/docker/docker/pkg/stdcopy
github.com/docker/docker/pkg/stringid
github.com/docker/docker/pkg/system
@ -354,6 +358,11 @@ github.com/opencontainers/go-digest
## explicit
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runc/libcontainer/user
# github.com/pelletier/go-toml v1.9.4
## explicit
github.com/pelletier/go-toml
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors