index
int64
0
0
repo_id
stringlengths
21
232
file_path
stringlengths
34
259
content
stringlengths
1
14.1M
__index_level_0__
int64
0
10k
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
// +build !windows package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io/ioutil" // TempDir on Unix systems is equivalent to ioutil.TempDir. func TempDir(dir, prefix string) (string, error) { return ioutil.TempDir(dir, prefix) }
9,400
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/readers.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "crypto/sha256" "encoding/hex" "io" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser // It calls the given callback function when closed. It should be constructed // with NewReadCloserWrapper type ReadCloserWrapper struct { io.Reader closer func() error } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { return r.closer() } // NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &ReadCloserWrapper{ Reader: r, closer: closer, } } type readerErrWrapper struct { reader io.Reader closer func() } func (r *readerErrWrapper) Read(p []byte) (int, error) { n, err := r.reader.Read(p) if err != nil { r.closer() } return n, err } // NewReaderErrWrapper returns a new io.Reader. func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { return &readerErrWrapper{ reader: r, closer: closer, } } // HashData returns the sha256 sum of src. func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } // OnEOFReader wraps an io.ReadCloser and a function // the function will run at the end of file or close the file. type OnEOFReader struct { Rc io.ReadCloser Fn func() } func (r *OnEOFReader) Read(p []byte) (n int, err error) { n, err = r.Rc.Read(p) if err == io.EOF { r.runFunc() } return } // Close closes the file and run the function. func (r *OnEOFReader) Close() error { err := r.Rc.Close() r.runFunc() return err } func (r *OnEOFReader) runFunc() { if fn := r.Fn; fn != nil { fn() r.Fn = nil } } // cancelReadCloser wraps an io.ReadCloser with a context for cancelling read // operations. type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the // context is cancelled. The returned io.ReadCloser must be closed when it is // no longer needed. func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { pR, pW := io.Pipe() // Create a context used to signal when the pipe is closed doneCtx, cancel := context.WithCancel(context.Background()) p := &cancelReadCloser{ cancel: cancel, pR: pR, pW: pW, } go func() { _, err := io.Copy(pW, in) select { case <-ctx.Done(): // If the context was closed, p.closeWithError // was already called. Calling it again would // change the error that Read returns. default: p.closeWithError(err) } in.Close() }() go func() { for { select { case <-ctx.Done(): p.closeWithError(ctx.Err()) case <-doneCtx.Done(): return } } }() return p } // Read wraps the Read method of the pipe that provides data from the wrapped // ReadCloser. func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { return p.pR.Read(buf) } // closeWithError closes the wrapper and its underlying reader. It will // cause future calls to Read to return err. func (p *cancelReadCloser) closeWithError(err error) { p.pW.CloseWithError(err) p.cancel() } // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { p.closeWithError(io.EOF) return nil }
9,401
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/writers.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io" // NopWriter represents a type which write operation is nop. type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } // NopWriteCloser returns a nopWriteCloser. func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } // NopFlusher represents a type which flush operation is nop. type NopFlusher struct{} // Flush is a nop operation. func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error } func (r *writeCloserWrapper) Close() error { return r.closer() } // NewWriteCloserWrapper returns a new io.WriteCloser. func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ Writer: r, closer: closer, } } // WriteCounter wraps a concrete io.Writer and hold a count of the number // of bytes written to the writer during a "session". // This can be convenient when write return is masked // (e.g., json.Encoder.Encode()) type WriteCounter struct { Count int64 Writer io.Writer } // NewWriteCounter returns a new WriteCounter. func NewWriteCounter(w io.Writer) *WriteCounter { return &WriteCounter{ Writer: w, } } func (wc *WriteCounter) Write(p []byte) (count int, err error) { count, err = wc.Writer.Write(p) wc.Count += int64(count) return }
9,402
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io/ioutil" "github.com/docker/docker/pkg/longpath" ) // TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { tempDir, err := ioutil.TempDir(dir, prefix) if err != nil { return "", err } return longpath.AddPrefix(tempDir), nil }
9,403
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/longpath/longpath.go
// longpath introduces some constants and helper functions for handling long paths // in Windows, which are expected to be prepended with `\\?\` and followed by either // a drive letter, a UNC server\share, or a volume identifier. package longpath // import "github.com/docker/docker/pkg/longpath" import ( "strings" ) // Prefix is the longpath prefix for Windows file paths. const Prefix = `\\?\` // AddPrefix will add the Windows long path prefix to the path provided if // it does not already have it. func AddPrefix(path string) string { if !strings.HasPrefix(path, Prefix) { if strings.HasPrefix(path, `\\`) { // This is a UNC path, so we need to add 'UNC' to the path as well. path = Prefix + `UNC` + path[1:] } else { path = Prefix + path } } return path }
9,404
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/meminfo.go
package system // import "github.com/docker/docker/pkg/system" // MemInfo contains memory statistics of the host system. type MemInfo struct { // Total usable RAM (i.e. physical RAM minus a few reserved bits and the // kernel binary code). MemTotal int64 // Amount of free memory. MemFree int64 // Total amount of swap space available. SwapTotal int64 // Amount of swap space that is currently unused. SwapFree int64 }
9,405
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/rm.go
package system // import "github.com/docker/docker/pkg/system" import ( "os" "syscall" "time" "github.com/docker/docker/pkg/mount" "github.com/pkg/errors" ) // EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can // often be remedied. // Only use `EnsureRemoveAll` if you really want to make every effort to remove // a directory. // // Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there // can be a race between reading directory entries and then actually attempting // to remove everything in the directory. // These types of errors do not need to be returned since it's ok for the dir to // be gone we can just retry the remove operation. // // This should not return a `os.ErrNotExist` kind of error under any circumstances func EnsureRemoveAll(dir string) error { notExistErr := make(map[string]bool) // track retries exitOnErr := make(map[string]int) maxRetry := 50 // Attempt to unmount anything beneath this dir first mount.RecursiveUnmount(dir) for { err := os.RemoveAll(dir) if err == nil { return nil } pe, ok := err.(*os.PathError) if !ok { return err } if os.IsNotExist(err) { if notExistErr[pe.Path] { return err } notExistErr[pe.Path] = true // There is a race where some subdir can be removed but after the parent // dir entries have been read. // So the path could be from `os.Remove(subdir)` // If the reported non-existent path is not the passed in `dir` we // should just retry, but otherwise return with no error. if pe.Path == dir { return nil } continue } if pe.Err != syscall.EBUSY { return err } if mounted, _ := mount.Mounted(pe.Path); mounted { if e := mount.Unmount(pe.Path); e != nil { if mounted, _ := mount.Mounted(pe.Path); mounted { return errors.Wrapf(e, "error while removing %s", dir) } } } if exitOnErr[pe.Path] == maxRetry { return err } exitOnErr[pe.Path]++ time.Sleep(100 * time.Millisecond) } }
9,406
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/args_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "strings" "golang.org/x/sys/windows" ) // EscapeArgs makes a Windows-style escaped command line from a set of arguments func EscapeArgs(args []string) string { escapedArgs := make([]string, len(args)) for i, a := range args { escapedArgs[i] = windows.EscapeArg(a) } return strings.Join(escapedArgs, " ") }
9,407
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
// +build !linux package system // import "github.com/docker/docker/pkg/system" // Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { return nil, ErrNotSupportedPlatform } // Lsetxattr is not supported on platforms other than linux. func Lsetxattr(path string, attr string, data []byte, flags int) error { return ErrNotSupportedPlatform }
9,408
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "os" "syscall" ) // StatT type contains status of a file. It contains metadata // like permission, owner, group, size, etc about a file. type StatT struct { mode uint32 uid uint32 gid uint32 rdev uint64 size int64 mtim syscall.Timespec } // Mode returns file's permission mode. func (s StatT) Mode() uint32 { return s.mode } // UID returns file's user id of owner. func (s StatT) UID() uint32 { return s.uid } // GID returns file's group id of owner. func (s StatT) GID() uint32 { return s.gid } // Rdev returns file's device ID (if it's special file). func (s StatT) Rdev() uint64 { return s.rdev } // Size returns file's size. func (s StatT) Size() int64 { return s.size } // Mtim returns file's last modification time. func (s StatT) Mtim() syscall.Timespec { return s.mtim } // IsDir reports whether s describes a directory. func (s StatT) IsDir() bool { return s.mode&syscall.S_IFDIR != 0 } // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // // Throws an error if the file does not exist func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { return nil, &os.PathError{Op: "Stat", Path: path, Err: err} } return fromStatT(s) }
9,409
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/process_windows.go
package system // import "github.com/docker/docker/pkg/system" import "os" // IsProcessAlive returns true if process with a given pid is running. func IsProcessAlive(pid int) bool { _, err := os.FindProcess(pid) return err == nil } // KillProcess force-stops a process. func KillProcess(pid int) { p, err := os.FindProcess(pid) if err == nil { p.Kill() } }
9,410
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtim}, nil }
9,411
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "fmt" "syscall" "unsafe" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) const ( OWNER_SECURITY_INFORMATION = 0x00000001 GROUP_SECURITY_INFORMATION = 0x00000002 DACL_SECURITY_INFORMATION = 0x00000004 SACL_SECURITY_INFORMATION = 0x00000008 LABEL_SECURITY_INFORMATION = 0x00000010 ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 SCOPE_SECURITY_INFORMATION = 0x00000040 PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 BACKUP_SECURITY_INFORMATION = 0x00010000 PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 ) const ( SE_UNKNOWN_OBJECT_TYPE = iota SE_FILE_OBJECT SE_SERVICE SE_PRINTER SE_REGISTRY_KEY SE_LMSHARE SE_KERNEL_OBJECT SE_WINDOW_OBJECT SE_DS_OBJECT SE_DS_OBJECT_ALL SE_PROVIDER_DEFINED_OBJECT SE_WMIGUID_OBJECT SE_REGISTRY_WOW64_32KEY ) const ( SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" ) const ( ContainerAdministratorSidString = "S-1-5-93-2-1" ContainerUserSidString = "S-1-5-93-2-2" ) var ( ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procGetVersionExW = modkernel32.NewProc("GetVersionExW") procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) // OSVersion is a wrapper for Windows version information // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx type OSVersion struct { Version uint32 MajorVersion uint8 MinorVersion uint8 Build uint16 } // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx type osVersionInfoEx struct { OSVersionInfoSize uint32 MajorVersion uint32 MinorVersion uint32 BuildNumber uint32 PlatformID uint32 CSDVersion [128]uint16 ServicePackMajor uint16 ServicePackMinor uint16 SuiteMask uint16 ProductType byte Reserve byte } // GetOSVersion gets the operating system version on Windows. Note that // docker.exe must be manifested to get the correct version information. func GetOSVersion() OSVersion { var err error osv := OSVersion{} osv.Version, err = windows.GetVersion() if err != nil { // GetVersion never fails. panic(err) } osv.MajorVersion = uint8(osv.Version & 0xFF) osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) osv.Build = uint16(osv.Version >> 16) return osv } func (osv OSVersion) ToString() string { return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) } // IsWindowsClient returns true if the SKU is client // @engine maintainers - this function should not be removed or modified as it // is used to enforce licensing restrictions on Windows. func IsWindowsClient() bool { osviex := &osVersionInfoEx{OSVersionInfoSize: 284} r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) if r1 == 0 { logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) return false } const verNTWorkstation = 0x00000001 return osviex.ProductType == verNTWorkstation } // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { return nil } // CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. func CommandLineToArgv(commandLine string) ([]string, error) { var argc int32 argsPtr, err := windows.UTF16PtrFromString(commandLine) if err != nil { return nil, err } argv, err := windows.CommandLineToArgv(argsPtr, &argc) if err != nil { return nil, err } defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) newArgs := make([]string, argc) for i, v := range (*argv)[:argc] { newArgs[i] = string(windows.UTF16ToString((*v)[:])) } return newArgs, nil } // HasWin32KSupport determines whether containers that depend on win32k can // run on this machine. Win32k is the driver used to implement windowing. func HasWin32KSupport() bool { // For now, check for ntuser API support on the host. In the future, a host // may support win32k in containers even if the host does not support ntuser // APIs. return ntuserApiset.Load() == nil } func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) if r0 != 0 { result = syscall.Errno(r0) } return } func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) if r1 == 0 { if e1 != 0 { result = syscall.Errno(e1) } else { result = syscall.EINVAL } } return }
9,412
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/path_windows.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // GetLongPathName converts Windows short pathnames to full pathnames. // For example C:\Users\ADMIN~1 --> C:\Users\Administrator. // It is a no-op on non-Windows platforms func GetLongPathName(path string) (string, error) { // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg p := syscall.StringToUTF16(path) b := p // GetLongPathName says we can reuse buffer n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } if n > uint32(len(b)) { b = make([]uint16, n) _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } } return syscall.UTF16ToString(b), nil }
9,413
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
package system // import "github.com/docker/docker/pkg/system" import "golang.org/x/sys/unix" // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { dest := make([]byte, 128) sz, errno := unix.Lgetxattr(path, attr, dest) if errno == unix.ENODATA { return nil, nil } if errno == unix.ERANGE { dest = make([]byte, sz) sz, errno = unix.Lgetxattr(path, attr, dest) } if errno != nil { return nil, errno } return dest[:sz], nil } // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { return unix.Lsetxattr(path, attr, data, flags) }
9,414
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/lcow.go
package system // import "github.com/docker/docker/pkg/system" import ( "runtime" "strings" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // IsOSSupported determines if an operating system is supported by the host func IsOSSupported(os string) bool { if strings.EqualFold(runtime.GOOS, os) { return true } if LCOWSupported() && strings.EqualFold(os, "linux") { return true } return false } // ValidatePlatform determines if a platform structure is valid. // TODO This is a temporary windows-only function, should be replaced by // comparison of worker capabilities func ValidatePlatform(platform specs.Platform) error { if runtime.GOOS == "windows" { if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { return errors.Errorf("unsupported os %s", platform.OS) } } return nil }
9,415
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/init_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "os" "github.com/Microsoft/hcsshim/osversion" "github.com/sirupsen/logrus" ) var ( // lcowSupported determines if Linux Containers on Windows are supported. lcowSupported = false // containerdRuntimeSupported determines if ContainerD should be the runtime. // As of March 2019, this is an experimental feature. containerdRuntimeSupported = false ) // InitLCOW sets whether LCOW is supported or not. Requires RS5+ func InitLCOW(experimental bool) { v := GetOSVersion() if experimental && v.Build >= osversion.RS5 { lcowSupported = true } } // InitContainerdRuntime sets whether to use ContainerD for runtime // on Windows. This is an experimental feature still in development, and // also requires an environment variable to be set (so as not to turn the // feature on from simply experimental which would also mean LCOW. func InitContainerdRuntime(experimental bool, cdPath string) { if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { logrus.Warnf("Using ContainerD runtime. This feature is experimental") containerdRuntimeSupported = true } } // ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. func ContainerdRuntimeSupported() bool { return containerdRuntimeSupported }
9,416
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
package system // import "github.com/docker/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { return lcowSupported }
9,417
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/path_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" // GetLongPathName converts Windows short pathnames to full pathnames. // For example C:\Users\ADMIN~1 --> C:\Users\Administrator. // It is a no-op on non-Windows platforms func GetLongPathName(path string) (string, error) { return path, nil }
9,418
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
package system // import "github.com/docker/docker/pkg/system" // Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { return ErrNotSupportedPlatform } // Mkdev is not implemented on Windows. func Mkdev(major int64, minor int64) uint32 { panic("Mkdev not implemented on Windows.") }
9,419
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_linux.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, // the type is 32bit on mips rdev: uint64(s.Rdev), // nolint: unconvert mtim: s.Mtim}, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type // This is exposed on Linux as pkg/archive/changes uses it. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) }
9,420
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/exitcode.go
package system // import "github.com/docker/docker/pkg/system" import ( "fmt" "os/exec" "syscall" ) // GetExitCode returns the ExitStatus of the specified error if its type is // exec.ExitError, returns 0 and an error otherwise. func GetExitCode(err error) (int, error) { exitCode := 0 if exiterr, ok := err.(*exec.ExitError); ok { if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { return procExit.ExitStatus(), nil } } return exitCode, fmt.Errorf("failed to get exit code") }
9,421
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "time" ) //setCTime will set the create time on a file. On Unix, the create //time is updated as a side effect of setting the modified time, so //no action is required. func setCTime(path string, ctime time.Time) error { return nil }
9,422
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/chtimes.go
package system // import "github.com/docker/docker/pkg/system" import ( "os" "time" ) // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) unixMaxTime := maxTime // If the modified time is prior to the Unix Epoch, or after the // end of Unix Time, os.Chtimes has undefined behavior // default to Unix Epoch in this case, just in case if atime.Before(unixMinTime) || atime.After(unixMaxTime) { atime = unixMinTime } if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { mtime = unixMinTime } if err := os.Chtimes(name, atime, mtime); err != nil { return err } // Take platform specific action for setting create time. return setCTime(name, mtime) }
9,423
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
// +build linux freebsd package system // import "github.com/docker/docker/pkg/system" import "golang.org/x/sys/unix" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { return unix.Unmount(dest, 0) } // CommandLineToArgv should not be used on Unix. // It simply returns commandLine in the only element in the returned array. func CommandLineToArgv(commandLine string) ([]string, error) { return []string{commandLine}, nil }
9,424
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
// +build !linux,!freebsd package system // import "github.com/docker/docker/pkg/system" import "syscall" // LUtimesNano is only supported on linux and freebsd. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform }
9,425
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/umask_windows.go
package system // import "github.com/docker/docker/pkg/system" // Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { // should not be called on cli code path return 0, ErrNotSupportedPlatform }
9,426
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/init_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" // InitLCOW does nothing since LCOW is a windows only feature func InitLCOW(experimental bool) { } // ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. func ContainerdRuntimeSupported(_ bool, _ string) bool { return true }
9,427
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "os" "path/filepath" "regexp" "strconv" "strings" "sync" "syscall" "time" "unsafe" winio "github.com/Microsoft/go-winio" "golang.org/x/sys/windows" ) const ( // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory // with an appropriate SDDL defined ACL. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { return mkdirall(path, true, sddl) } // MkdirAll implementation that is volume path aware for Windows. func MkdirAll(path string, _ os.FileMode, sddl string) error { return mkdirall(path, false, sddl) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } // The rest of this method is largely copied from os.MkdirAll and should be kept // as-is to ensure compatibility. // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { if dir.IsDir() { return nil } return &os.PathError{ Op: "mkdir", Path: path, Err: syscall.ENOTDIR, } } // Slow path: make sure parent exists and then call Mkdir for path. i := len(path) for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. i-- } j := i for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. j-- } if j > 1 { // Create parent err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. if applyACL { err = mkdirWithACL(path, sddl) } else { err = os.Mkdir(path, 0) } if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. dir, err1 := os.Lstat(path) if err1 == nil && dir.IsDir() { return nil } return err } return nil } // mkdirWithACL creates a new directory. If there is an error, it will be of // type *PathError. . // // This is a modified and combined version of os.Mkdir and windows.Mkdir // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. func mkdirWithACL(name string, sddl string) error { sa := windows.SecurityAttributes{Length: 0} sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) namep, err := windows.UTF16PtrFromString(name) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } e := windows.CreateDirectory(namep, &sa) if e != nil { return &os.PathError{Op: "mkdir", Path: name, Err: e} } return nil } // IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, // golang filepath.IsAbs does not consider a path \windows\system32 as absolute // as it doesn't start with a drive-letter/colon combination. However, in // docker we need to verify things such as WORKDIR /windows/system32 in // a Dockerfile (which gets translated to \windows\system32 when being processed // by the daemon. This SHOULD be treated as absolute from a docker processing // perspective. func IsAbs(path string) bool { if !filepath.IsAbs(path) { if !strings.HasPrefix(path, string(os.PathSeparator)) { return false } } return true } // The origin of the functions below here are the golang OS and windows packages, // slightly modified to only cope with files, not directories due to the // specific use case. // // The alteration is to allow a file on Windows to be opened with // FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating // the standby list, particularly when accessing large files such as layer.tar. // CreateSequential creates the named file with mode 0666 (before umask), truncating // it if it already exists. If successful, methods on the returned // File can be used for I/O; the associated file descriptor has mode // O_RDWR. // If there is an error, it will be of type *PathError. func CreateSequential(name string) (*os.File, error) { return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) } // OpenSequential opens the named file for reading. If successful, methods on // the returned file can be used for reading; the associated file // descriptor has mode O_RDONLY. // If there is an error, it will be of type *PathError. func OpenSequential(name string) (*os.File, error) { return OpenFileSequential(name, os.O_RDONLY, 0) } // OpenFileSequential is the generalized open call; most users will use Open // or Create instead. // If there is an error, it will be of type *PathError. func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { if name == "" { return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} } r, errf := windowsOpenFileSequential(name, flag, 0) if errf == nil { return r, nil } return nil, &os.PathError{Op: "open", Path: name, Err: errf} } func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) if e != nil { return nil, e } return os.NewFile(uintptr(r), name), nil } func makeInheritSa() *windows.SecurityAttributes { var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 return &sa } func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { if len(path) == 0 { return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } pathp, err := windows.UTF16PtrFromString(path) if err != nil { return windows.InvalidHandle, err } var access uint32 switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { case windows.O_RDONLY: access = windows.GENERIC_READ case windows.O_WRONLY: access = windows.GENERIC_WRITE case windows.O_RDWR: access = windows.GENERIC_READ | windows.GENERIC_WRITE } if mode&windows.O_CREAT != 0 { access |= windows.GENERIC_WRITE } if mode&windows.O_APPEND != 0 { access &^= windows.GENERIC_WRITE access |= windows.FILE_APPEND_DATA } sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) var sa *windows.SecurityAttributes if mode&windows.O_CLOEXEC == 0 { sa = makeInheritSa() } var createmode uint32 switch { case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): createmode = windows.CREATE_NEW case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): createmode = windows.CREATE_ALWAYS case mode&windows.O_CREAT == windows.O_CREAT: createmode = windows.OPEN_ALWAYS case mode&windows.O_TRUNC == windows.O_TRUNC: createmode = windows.TRUNCATE_EXISTING default: createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) return h, e } // Helpers for TempFileSequential var rand uint32 var randmu sync.Mutex func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) } func nextSuffix() string { randmu.Lock() r := rand if r == 0 { r = reseed() } r = r*1664525 + 1013904223 // constants from Numerical Recipes rand = r randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } // TempFileSequential is a copy of ioutil.TempFile, modified to use sequential // file access. Below is the original comment from golang: // TempFile creates a new temporary file in the directory dir // with a name beginning with prefix, opens the file for reading // and writing, and returns the resulting *os.File. // If dir is the empty string, TempFile uses the default directory // for temporary files (see os.TempDir). // Multiple programs calling TempFile simultaneously // will not choose the same file. The caller can use f.Name() // to find the pathname of the file. It is the caller's responsibility // to remove the file when no longer needed. func TempFileSequential(dir, prefix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() } nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() rand = reseed() randmu.Unlock() } continue } break } return }
9,428
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/init.go
package system // import "github.com/docker/docker/pkg/system" import ( "syscall" "time" "unsafe" ) // Used by chtimes var maxTime time.Time func init() { // chtimes initialization if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { // This is a 64 bit timespec // os.Chtimes limits time to the following maxTime = time.Unix(0, 1<<63-1) } else { // This is a 32 bit timespec maxTime = time.Unix(1<<31-1, 0) } }
9,429
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { return false }
9,430
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/errors.go
package system // import "github.com/docker/docker/pkg/system" import ( "errors" ) var ( // ErrNotSupportedPlatform means the platform is not supported. ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") // ErrNotSupportedOperatingSystem means the operating system is not supported. ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") )
9,431
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/mknod.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "golang.org/x/sys/unix" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major // and minor number of the newly created device special file. // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { return uint32(unix.Mkdev(uint32(major), uint32(minor))) }
9,432
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/filesys.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "io/ioutil" "os" "path/filepath" ) // MkdirAllWithACL is a wrapper for MkdirAll on unix systems. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { return MkdirAll(path, perm, sddl) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } // IsAbs is a platform-specific wrapper for filepath.IsAbs. func IsAbs(path string) bool { return filepath.IsAbs(path) } // The functions below here are wrappers for the equivalents in the os and ioutils packages. // They are passthrough on Unix platforms, and only relevant on Windows. // CreateSequential creates the named file with mode 0666 (before umask), truncating // it if it already exists. If successful, methods on the returned // File can be used for I/O; the associated file descriptor has mode // O_RDWR. // If there is an error, it will be of type *PathError. func CreateSequential(name string) (*os.File, error) { return os.Create(name) } // OpenSequential opens the named file for reading. If successful, methods on // the returned file can be used for reading; the associated file // descriptor has mode O_RDONLY. // If there is an error, it will be of type *PathError. func OpenSequential(name string) (*os.File, error) { return os.Open(name) } // OpenFileSequential is the generalized open call; most users will use Open // or Create instead. It opens the named file with specified flag // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, // methods on the returned File can be used for I/O. // If there is an error, it will be of type *PathError. func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } // TempFileSequential creates a new temporary file in the directory dir // with a name beginning with prefix, opens the file for reading // and writing, and returns the resulting *os.File. // If dir is the empty string, TempFile uses the default directory // for temporary files (see os.TempDir). // Multiple programs calling TempFile simultaneously // will not choose the same file. The caller can use f.Name() // to find the pathname of the file. It is the caller's responsibility // to remove the file when no longer needed. func TempFileSequential(dir, prefix string) (f *os.File, err error) { return ioutil.TempFile(dir, prefix) }
9,433
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "os" "time" ) // StatT type contains status of a file. It contains metadata // like permission, size, etc about a file. type StatT struct { mode os.FileMode size int64 mtim time.Time } // Size returns file's size. func (s StatT) Size() int64 { return s.size } // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { return os.FileMode(s.mode) } // Mtim returns file's last modification time. func (s StatT) Mtim() time.Time { return time.Time(s.mtim) } // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // // Throws an error if the file does not exist func Stat(path string) (*StatT, error) { fi, err := os.Stat(path) if err != nil { return nil, err } return fromStatT(&fi) } // fromStatT converts a os.FileInfo type to a system.StatT type func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), mtim: (*fi).ModTime()}, nil }
9,434
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/process_unix.go
// +build linux freebsd darwin package system // import "github.com/docker/docker/pkg/system" import ( "syscall" "golang.org/x/sys/unix" ) // IsProcessAlive returns true if process with a given pid is running. func IsProcessAlive(pid int) bool { err := unix.Kill(pid, syscall.Signal(0)) if err == nil || err == unix.EPERM { return true } return false } // KillProcess force-stops a process. func KillProcess(pid int) { unix.Kill(pid, unix.SIGKILL) }
9,435
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil }
9,436
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
package system // import "github.com/docker/docker/pkg/system" import "os" // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } return fromStatT(&fi) }
9,437
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "os" "syscall" ) // Lstat takes a path to a file and returns // a system.StatT type pertaining to that file. // // Throws an error if the file does not exist func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) }
9,438
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
// +build linux freebsd package system // import "github.com/docker/docker/pkg/system" import ( "syscall" "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { uts := []unix.Timespec{ unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), } err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) if err != nil && err != unix.ENOSYS { return err } return nil }
9,439
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil }
9,440
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
package system // import "github.com/docker/docker/pkg/system" import ( "bufio" "io" "os" "strconv" "strings" "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a // MemInfo type. func ReadMemInfo() (*MemInfo, error) { file, err := os.Open("/proc/meminfo") if err != nil { return nil, err } defer file.Close() return parseMemInfo(file) } // parseMemInfo parses the /proc/meminfo file into // a MemInfo object given an io.Reader to the file. // Throws error if there are problems reading from the file func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) memAvailable := int64(-1) for scanner.Scan() { // Expected format: ["MemTotal:", "1234", "kB"] parts := strings.Fields(scanner.Text()) // Sanity checks: Skip malformed entries. if len(parts) < 3 || parts[2] != "kB" { continue } // Convert to bytes. size, err := strconv.Atoi(parts[1]) if err != nil { continue } bytes := int64(size) * units.KiB switch parts[0] { case "MemTotal:": meminfo.MemTotal = bytes case "MemFree:": meminfo.MemFree = bytes case "MemAvailable:": memAvailable = bytes case "SwapTotal:": meminfo.SwapTotal = bytes case "SwapFree:": meminfo.SwapFree = bytes } } if memAvailable != -1 { meminfo.MemFree = memAvailable } // Handle errors that may have occurred during the reading of the file. if err := scanner.Err(); err != nil { return nil, err } return meminfo, nil }
9,441
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtim}, nil }
9,442
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "time" "golang.org/x/sys/windows" ) //setCTime will set the create time on a file. On Windows, this requires //calling SetFileTime and explicitly including the create time. func setCTime(path string, ctime time.Time) error { ctimespec := windows.NsecToTimespec(ctime.UnixNano()) pathp, e := windows.UTF16PtrFromString(path) if e != nil { return e } h, e := windows.CreateFile(pathp, windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) if e != nil { return e } defer windows.Close(h) c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) return windows.SetFileTime(h, &c, nil, nil) }
9,443
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
// +build !linux,!windows package system // import "github.com/docker/docker/pkg/system" // ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { return nil, ErrNotSupportedPlatform }
9,444
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/umask.go
// +build !windows package system // import "github.com/docker/docker/pkg/system" import ( "golang.org/x/sys/unix" ) // Umask sets current process's file mode creation mask to newmask // and returns oldmask. func Umask(newmask int) (oldmask int, err error) { return unix.Umask(newmask), nil }
9,445
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
package system // import "github.com/docker/docker/pkg/system" import ( "unsafe" "golang.org/x/sys/windows" ) var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") ) // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx type memorystatusex struct { dwLength uint32 dwMemoryLoad uint32 ullTotalPhys uint64 ullAvailPhys uint64 ullTotalPageFile uint64 ullAvailPageFile uint64 ullTotalVirtual uint64 ullAvailVirtual uint64 ullAvailExtendedVirtual uint64 } // ReadMemInfo retrieves memory statistics of the host system and returns a // MemInfo type. func ReadMemInfo() (*MemInfo, error) { msi := &memorystatusex{ dwLength: 64, } r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) if r1 == 0 { return &MemInfo{}, nil } return &MemInfo{ MemTotal: int64(msi.ullTotalPhys), MemFree: int64(msi.ullAvailPhys), SwapTotal: int64(msi.ullTotalPageFile), SwapFree: int64(msi.ullAvailPageFile), }, nil }
9,446
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/system/path.go
package system // import "github.com/docker/docker/pkg/system" import ( "fmt" "path/filepath" "runtime" "strings" "github.com/containerd/continuity/pathdriver" ) const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . func DefaultPathEnv(os string) string { if runtime.GOOS == "windows" { if os != runtime.GOOS { return defaultUnixPathEnv } // Deliberately empty on Windows containers on Windows as the default path will be set by // the container. Docker has no context of what the default path should be. return "" } return defaultUnixPathEnv } // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. // On Linux: this is a no-op. // On Windows: this does the following> // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We // need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ // a --> a // /a --> \a // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { if runtime.GOOS != "windows" || LCOWSupported() { return path, nil } if len(path) == 2 && string(path[1]) == ":" { return "", fmt.Errorf("No relative path specified in %q", path) } if !driver.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { return "", fmt.Errorf("The specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil }
9,447
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
package tarsum // import "github.com/docker/docker/pkg/tarsum" // BuilderContext is an interface extending TarSum by adding the Remove method. // In general there was concern about adding this method to TarSum itself // so instead it is being added just to "BuilderContext" which will then // only be used during the .dockerignore file processing // - see builder/evaluator.go type BuilderContext interface { TarSum Remove(string) } func (bc *tarSum) Remove(filename string) { for i, fis := range bc.sums { if fis.Name() == filename { bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) // Note, we don't just return because there could be // more than one with this name } } }
9,448
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
page_title: TarSum checksum specification page_description: Documentation for algorithms used in the TarSum checksum calculation page_keywords: docker, checksum, validation, tarsum # TarSum Checksum Specification ## Abstract This document describes the algorithms used in performing the TarSum checksum calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. ## Warning This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. This is _not_ a cryptographic attestation, and should not be considered secure. ## Introduction The transportation of filesystems, regarding Docker, is done with tar(1) archives. There are a variety of tar serialization formats [2], and a key concern here is ensuring a repeatable checksum given a set of inputs from a generic tar archive. Types of transportation include distribution to and from a registry endpoint, saving and loading through commands or Docker daemon APIs, transferring the build context from client to Docker daemon, and committing the filesystem of a container to become an image. As tar archives are used for transit, but not preserved in many situations, the focus of the algorithm is to ensure the integrity of the preserved filesystem, while maintaining a deterministic accountability. This includes neither constraining the ordering or manipulation of the files during the creation or unpacking of the archive, nor include additional metadata state about the file system attributes. ## Intended Audience This document is outlining the methods used for consistent checksum calculation for filesystems transported via tar archives. Auditing these methodologies is an open and iterative process. This document should accommodate the review of source code. Ultimately, this document should be the starting point of further refinements to the algorithm and its future versions. ## Concept The checksum mechanism must ensure the integrity and assurance of the filesystem payload. ## Checksum Algorithm Profile A checksum mechanism must define the following operations and attributes: * Associated hashing cipher - used to checksum each file payload and attribute information. * Checksum list - each file of the filesystem archive has its checksum calculated from the payload and attributes of the file. The final checksum is calculated from this list, with specific ordering. * Version - as the algorithm adapts to requirements, there are behaviors of the algorithm to manage by versioning. * Archive being calculated - the tar archive having its checksum calculated ## Elements of TarSum checksum The calculated sum output is a text string. The elements included in the output of the calculated sum comprise the information needed for validation of the sum (TarSum version and hashing cipher used) and the expected checksum in hexadecimal form. There are two delimiters used: * '+' separates TarSum version from hashing cipher * ':' separates calculation mechanics from expected hash Example: ``` "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" | | \ | | | \ | |_version_|_cipher__|__ | | \ | |_calculation_mechanics_|______________________expected_sum_______________________| ``` ## Versioning Versioning was introduced [0] to accommodate differences in calculation needed, and ability to maintain reverse compatibility. The general algorithm will be describe further in the 'Calculation'. ### Version0 This is the initial version of TarSum. Its element in the TarSum checksum string is `tarsum`. ### Version1 Its element in the TarSum checksum is `tarsum.v1`. The notable changes in this version: * Exclusion of file `mtime` from the file information headers, in each file checksum calculation * Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax tar file info headers) keys and values in each file checksum calculation ### VersionDev *Do not use unless validating refinements to the checksum algorithm* Its element in the TarSum checksum is `tarsum.dev`. This is a floating place holder for a next version and grounds for testing changes. The methods used for calculation are subject to change without notice, and this version is for testing and not for production use. ## Ciphers The official default and standard hashing cipher used in the calculation mechanic is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. Though the TarSum algorithm itself is not exclusively bound to the single hashing cipher `sha256`, support for alternate hashing ciphers was later added [1]. Use cases for alternate cipher could include future-proofing TarSum checksum format and using faster cipher hashes for tar filesystem checksums. ## Calculation ### Requirement As mentioned earlier, the calculation is such that it takes into consideration the lifecycle of the tar archive. In that the tar archive is not an immutable, permanent artifact. Otherwise options like relying on a known hashing cipher checksum of the archive itself would be reliable enough. The tar archive of the filesystem is used as a transportation medium for Docker images, and the archive is discarded once its contents are extracted. Therefore, for consistent validation items such as order of files in the tar archive and time stamps are subject to change once an image is received. ### Process The method is typically iterative due to reading tar info headers from the archive stream, though this is not a strict requirement. #### Files Each file in the tar archive have their contents (headers and body) checksummed individually using the designated associated hashing cipher. The ordered headers of the file are written to the checksum calculation first, and then the payload of the file body. The resulting checksum of the file is appended to the list of file sums. The sum is encoded as a string of the hexadecimal digest. Additionally, the file name and position in the archive is kept as reference for special ordering. #### Headers The following headers are read, in this order ( and the corresponding representation of its value): * 'name' - string * 'mode' - string of the base10 integer * 'uid' - string of the integer * 'gid' - string of the integer * 'size' - string of the integer * 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC * 'typeflag' - string of the char * 'linkname' - string * 'uname' - string * 'gname' - string * 'devmajor' - string of the integer * 'devminor' - string of the integer For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax headers) included after the above list. These xattrs key/values are first sorted by the keys. #### Header Format The ordered headers are written to the hash in the format of "{.key}{.value}" with no newline. #### Body After the order headers of the file have been added to the checksum for the file, the body of the file is written to the hash. #### List of file sums The list of file sums is sorted by the string of the hexadecimal digest. If there are two files in the tar with matching paths, the order of occurrence for that path is reflected for the sums of the corresponding file header and body. #### Final Checksum Begin with a fresh or initial state of the associated hash cipher. If there is additional payload to include in the TarSum calculation for the archive, it is written first. Then each checksum from the ordered list of file sums is written to the hash. The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. ## Security Considerations The initial version of TarSum has undergone one update that could invalidate handcrafted tar archives. The tar archive format supports appending of files with same names as prior files in the archive. The latter file will clobber the prior file of the same path. Due to this the algorithm now accounts for files with matching paths, and orders the list of file sums accordingly [3]. ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 * [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e * [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 * [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 ## Acknowledgments Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the TarSum calculation.
9,449
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/versioning.go
package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "archive/tar" "errors" "io" "sort" "strconv" "strings" ) // Version is used for versioning of the TarSum algorithm // based on the prefix of the hash used // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int // Prefix of "tarsum" const ( Version0 Version = iota Version1 // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) // WriteV1Header writes a tar header to a writer in V1 tarsum format. func WriteV1Header(h *tar.Header, w io.Writer) { for _, elem := range v1TarHeaderSelect(h) { w.Write([]byte(elem[0] + elem[1])) } } // VersionLabelForChecksum returns the label for the given tarsum // checksum, i.e., everything before the first `+` character in // the string or an empty string if no label separator is found. func VersionLabelForChecksum(checksum string) string { // Checksums are in the form: {versionLabel}+{hashID}:{hex} sepIndex := strings.Index(checksum, "+") if sepIndex < 0 { return "" } return checksum[:sepIndex] } // GetVersions gets a list of all known tarsum versions. func GetVersions() []Version { v := []Version{} for k := range tarSumVersions { v = append(v, k) } return v } var ( tarSumVersions = map[Version]string{ Version0: "tarsum", Version1: "tarsum.v1", VersionDev: "tarsum.dev", } tarSumVersionsByName = map[string]Version{ "tarsum": Version0, "tarsum.v1": Version1, "tarsum.dev": VersionDev, } ) func (tsv Version) String() string { return tarSumVersions[tsv] } // GetVersionFromTarsum returns the Version from the provided string. func GetVersionFromTarsum(tarsum string) (Version, error) { tsv := tarsum if strings.Contains(tarsum, "+") { tsv = strings.SplitN(tarsum, "+", 2)[0] } for v, s := range tarSumVersions { if s == tsv { return v, nil } } return -1, ErrNotVersion } // Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) // tarHeaderSelector is the interface which different versions // of tarsum should use for selecting and ordering tar headers // for each item in the archive. type tarHeaderSelector interface { selectHeaders(h *tar.Header) (orderedHeaders [][2]string) } type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { return f(h) } func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { return [][2]string{ {"name", h.Name}, {"mode", strconv.FormatInt(h.Mode, 10)}, {"uid", strconv.Itoa(h.Uid)}, {"gid", strconv.Itoa(h.Gid)}, {"size", strconv.FormatInt(h.Size, 10)}, {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, {"typeflag", string([]byte{h.Typeflag})}, {"linkname", h.Linkname}, {"uname", h.Uname}, {"gname", h.Gname}, {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, {"devminor", strconv.FormatInt(h.Devminor, 10)}, } } func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { // Get extended attributes. xAttrKeys := make([]string, len(h.Xattrs)) for k := range h.Xattrs { xAttrKeys = append(xAttrKeys, k) } sort.Strings(xAttrKeys) // Make the slice with enough capacity to hold the 11 basic headers // we want from the v0 selector plus however many xattrs we have. orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) // Copy all headers from v0 excluding the 'mtime' header (the 5th element). v0headers := v0TarHeaderSelect(h) orderedHeaders = append(orderedHeaders, v0headers[0:5]...) orderedHeaders = append(orderedHeaders, v0headers[6:]...) // Finally, append the sorted xattrs. for _, k := range xAttrKeys { orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) } return } var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ Version0: v0TarHeaderSelect, Version1: v1TarHeaderSelect, VersionDev: v1TarHeaderSelect, } func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { headerSelector, ok := registeredHeaderSelectors[v] if !ok { return nil, ErrVersionNotImplemented } return headerSelector, nil }
9,450
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
// Package tarsum provides algorithms to perform checksum calculation on // filesystem layers. // // The transportation of filesystems, regarding Docker, is done with tar(1) // archives. There are a variety of tar serialization formats [2], and a key // concern here is ensuring a repeatable checksum given a set of inputs from a // generic tar archive. Types of transportation include distribution to and from a // registry endpoint, saving and loading through commands or Docker daemon APIs, // transferring the build context from client to Docker daemon, and committing the // filesystem of a container to become an image. // // As tar archives are used for transit, but not preserved in many situations, the // focus of the algorithm is to ensure the integrity of the preserved filesystem, // while maintaining a deterministic accountability. This includes neither // constraining the ordering or manipulation of the files during the creation or // unpacking of the archive, nor include additional metadata state about the file // system attributes. package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "archive/tar" "bytes" "compress/gzip" "crypto" "crypto/sha256" "encoding/hex" "errors" "fmt" "hash" "io" "path" "strings" ) const ( buf8K = 8 * 1024 buf16K = 16 * 1024 buf32K = 32 * 1024 ) // NewTarSum creates a new interface for calculating a fixed time checksum of a // tar archive. // // This is used for calculating checksums of layers of an image, in some cases // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { return NewTarSumHash(r, dc, v, DefaultTHash) } // NewTarSumHash creates a new TarSum, providing a THash to use rather than // the DefaultTHash. func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { headerSelector, err := getTarHeaderSelector(v) if err != nil { return nil, err } ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} err = ts.initTarSum() return ts, err } // NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { parts := strings.SplitN(label, "+", 2) if len(parts) != 2 { return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") } versionName, hashName := parts[0], parts[1] version, ok := tarSumVersionsByName[versionName] if !ok { return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) } hashConfig, ok := standardHashConfigs[hashName] if !ok { return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) } tHash := NewTHash(hashConfig.name, hashConfig.hash.New) return NewTarSumHash(r, disableCompression, version, tHash) } // TarSum is the generic interface for calculating fixed time // checksums of a tar archive. type TarSum interface { io.Reader GetSums() FileInfoSums Sum([]byte) string Version() Version Hash() THash } // tarSum struct is the structure for a Version0 checksum calculation. type tarSum struct { io.Reader tarR *tar.Reader tarW *tar.Writer writer writeCloseFlusher bufTar *bytes.Buffer bufWriter *bytes.Buffer bufData []byte h hash.Hash tHash THash sums FileInfoSums fileCounter int64 currentFile string finished bool first bool DisableCompression bool // false by default. When false, the output gzip compressed. tarSumVersion Version // this field is not exported so it can not be mutated during use headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { return ts.tHash } func (ts tarSum) Version() Version { return ts.tarSumVersion } // THash provides a hash.Hash type generator and its name. type THash interface { Hash() hash.Hash Name() string } // NewTHash is a convenience method for creating a THash. func NewTHash(name string, h func() hash.Hash) THash { return simpleTHash{n: name, h: h} } type tHashConfig struct { name string hash crypto.Hash } var ( // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. standardHashConfigs = map[string]tHashConfig{ "sha256": {name: "sha256", hash: crypto.SHA256}, "sha512": {name: "sha512", hash: crypto.SHA512}, } ) // DefaultTHash is default TarSum hashing algorithm - "sha256". var DefaultTHash = NewTHash("sha256", sha256.New) type simpleTHash struct { n string h func() hash.Hash } func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } func (ts *tarSum) encodeHeader(h *tar.Header) error { for _, elem := range ts.headerSelector.selectHeaders(h) { // Ignore these headers to be compatible with versions // before go 1.10 if elem[0] == "gname" || elem[0] == "uname" { elem[1] = "" } if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } return nil } func (ts *tarSum) initTarSum() error { ts.bufTar = bytes.NewBuffer([]byte{}) ts.bufWriter = bytes.NewBuffer([]byte{}) ts.tarR = tar.NewReader(ts.Reader) ts.tarW = tar.NewWriter(ts.bufTar) if !ts.DisableCompression { ts.writer = gzip.NewWriter(ts.bufWriter) } else { ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} } if ts.tHash == nil { ts.tHash = DefaultTHash } ts.h = ts.tHash.Hash() ts.h.Reset() ts.first = true ts.sums = FileInfoSums{} return nil } func (ts *tarSum) Read(buf []byte) (int, error) { if ts.finished { return ts.bufWriter.Read(buf) } if len(ts.bufData) < len(buf) { switch { case len(buf) <= buf8K: ts.bufData = make([]byte, buf8K) case len(buf) <= buf16K: ts.bufData = make([]byte, buf16K) case len(buf) <= buf32K: ts.bufData = make([]byte, buf32K) default: ts.bufData = make([]byte, len(buf)) } } buf2 := ts.bufData[:len(buf)] n, err := ts.tarR.Read(buf2) if err != nil { if err == io.EOF { if _, err := ts.h.Write(buf2[:n]); err != nil { return 0, err } if !ts.first { ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) ts.fileCounter++ ts.h.Reset() } else { ts.first = false } if _, err := ts.tarW.Write(buf2[:n]); err != nil { return 0, err } currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { if err := ts.tarW.Close(); err != nil { return 0, err } if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } if err := ts.writer.Close(); err != nil { return 0, err } ts.finished = true return ts.bufWriter.Read(buf) } return 0, err } ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name)) if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } return 0, err } // Filling the hash buffer if _, err = ts.h.Write(buf2[:n]); err != nil { return 0, err } // Filling the tar writer if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } // Filling the output writer if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } func (ts *tarSum) Sum(extra []byte) string { ts.sums.SortBySums() h := ts.tHash.Hash() if extra != nil { h.Write(extra) } for _, fis := range ts.sums { h.Write([]byte(fis.Sum())) } checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) return checksum } func (ts *tarSum) GetSums() FileInfoSums { return ts.sums }
9,451
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "io" ) type writeCloseFlusher interface { io.WriteCloser Flush() error } type nopCloseFlusher struct { io.Writer } func (n *nopCloseFlusher) Close() error { return nil } func (n *nopCloseFlusher) Flush() error { return nil }
9,452
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "runtime" "sort" "strings" ) // FileInfoSumInterface provides an interface for accessing file checksum // information within a tar file. This info is accessed through interface // so the actual name and sum cannot be melded with. type FileInfoSumInterface interface { // File name Name() string // Checksum of this particular file and its headers Sum() string // Position of file in the tar Pos() int64 } type fileInfoSum struct { name string sum string pos int64 } func (fis fileInfoSum) Name() string { return fis.name } func (fis fileInfoSum) Sum() string { return fis.sum } func (fis fileInfoSum) Pos() int64 { return fis.pos } // FileInfoSums provides a list of FileInfoSumInterfaces. type FileInfoSums []FileInfoSumInterface // GetFile returns the first FileInfoSumInterface with a matching name. func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { // We do case insensitive matching on Windows as c:\APP and c:\app are // the same. See issue #33107. for i := range fis { if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) || (runtime.GOOS != "windows" && fis[i].Name() == name) { return fis[i] } } return nil } // GetAllFile returns a FileInfoSums with all matching names. func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { f := FileInfoSums{} for i := range fis { if fis[i].Name() == name { f = append(f, fis[i]) } } return f } // GetDuplicatePaths returns a FileInfoSums with all duplicated paths. func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. for i := range fis { f := fis[i] if _, ok := seen[f.Name()]; ok { dups = append(dups, f) } else { seen[f.Name()] = 0 } } return dups } // Len returns the size of the FileInfoSums. func (fis FileInfoSums) Len() int { return len(fis) } // Swap swaps two FileInfoSum values if a FileInfoSums list. func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } // SortByPos sorts FileInfoSums content by position. func (fis FileInfoSums) SortByPos() { sort.Sort(byPos{fis}) } // SortByNames sorts FileInfoSums content by name. func (fis FileInfoSums) SortByNames() { sort.Sort(byName{fis}) } // SortBySums sorts FileInfoSums content by sums. func (fis FileInfoSums) SortBySums() { dups := fis.GetDuplicatePaths() if len(dups) > 0 { sort.Sort(bySum{fis, dups}) } else { sort.Sort(bySum{fis, nil}) } } // byName is a sort.Sort helper for sorting by file names. // If names are the same, order them by their appearance in the tar archive type byName struct{ FileInfoSums } func (bn byName) Less(i, j int) bool { if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() } return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() } // bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive type bySum struct { FileInfoSums dups FileInfoSums } func (bs bySum) Less(i, j int) bool { if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() } return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() } // byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order type byPos struct{ FileInfoSums } func (bp byPos) Less(i, j int) bool { return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() }
9,453
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
// +build !linux package idtools // import "github.com/docker/docker/pkg/idtools" import "fmt" // AddNamespaceRangesUser takes a name and finds an unused uid, gid pair // and calls the appropriate helper function to add the group and then // the user to the group in /etc/group and /etc/passwd respectively. func AddNamespaceRangesUser(name string) (int, int, error) { return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") }
9,454
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strings" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return lazyChown(path, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode, ""); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(username string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(username) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) } func getentUser(args string) (user.User, error) { reader, err := callGetent(args) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(groupname string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(groupname) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(fmt.Sprintf("%s %d", "group", gid)) } func getentGroup(args string) (user.Group, error) { reader, err := callGetent(args) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) } return groups[0], nil } func callGetent(args string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("") } out, err := execCmd(getentCmd, args) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: terms := strings.Split(args, " ") return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // lazyChown performs a chown only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. func lazyChown(p string, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) }
9,455
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" "os/exec" "path/filepath" "strings" ) func resolveBinary(binname string) (string, error) { binaryPath, err := exec.LookPath(binname) if err != nil { return "", err } resolvedPath, err := filepath.EvalSymlinks(binaryPath) if err != nil { return "", err } //only return no error if the final resolved binary basename //matches what was searched for if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } func execCmd(cmd, args string) ([]byte, error) { execCmd := exec.Command(cmd, strings.Split(args, " ")...) return execCmd.CombinedOutput() }
9,456
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" "regexp" "sort" "strconv" "strings" "sync" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard // Linux distribution commands: // adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username> // useradd -r -s /bin/false <username> var ( once sync.Once userCommand string cmdTemplates = map[string]string{ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", "useradd": "-r -s /bin/false %s", "usermod": "-%s %d-%d %s", } idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) // default length for a UID/GID subordinate range defaultRangeLen = 65536 defaultRangeStart = 100000 userMod = "usermod" ) // AddNamespaceRangesUser takes a username and uses the standard system // utility to create a system user/group pair used to hold the // /etc/sub{uid,gid} ranges which will be used for user namespace // mapping ranges in containers. func AddNamespaceRangesUser(name string) (int, int, error) { if err := addUser(name); err != nil { return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) } // Query the system for the created uid and gid pair out, err := execCmd("id", name) if err != nil { return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) } matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) if len(matches) != 3 { return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) } uid, err := strconv.Atoi(matches[1]) if err != nil { return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) } gid, err := strconv.Atoi(matches[2]) if err != nil { return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) } // Now we need to create the subuid/subgid ranges for our new user/group (system users // do not get auto-created ranges in subuid/subgid) if err := createSubordinateRanges(name); err != nil { return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) } return uid, gid, nil } func addUser(userName string) error { once.Do(func() { // set up which commands are used for adding users/groups dependent on distro if _, err := resolveBinary("adduser"); err == nil { userCommand = "adduser" } else if _, err := resolveBinary("useradd"); err == nil { userCommand = "useradd" } }) if userCommand == "" { return fmt.Errorf("Cannot add user; no useradd/adduser binary found") } args := fmt.Sprintf(cmdTemplates[userCommand], userName) out, err := execCmd(userCommand, args) if err != nil { return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) } return nil } func createSubordinateRanges(name string) error { // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := parseSubuid(name) if err != nil { return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) } if len(ranges) == 0 { // no UID ranges; let's create one startID, err := findNextUIDRange() if err != nil { return fmt.Errorf("Can't find available subuid range: %v", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) if err != nil { return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) } } ranges, err = parseSubgid(name) if err != nil { return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) } if len(ranges) == 0 { // no GID ranges; let's create one startID, err := findNextGIDRange() if err != nil { return fmt.Errorf("Can't find available subgid range: %v", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) if err != nil { return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) } } return nil } func findNextUIDRange() (int, error) { ranges, err := parseSubuid("ALL") if err != nil { return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) } sort.Sort(ranges) return findNextRangeStart(ranges) } func findNextGIDRange() (int, error) { ranges, err := parseSubgid("ALL") if err != nil { return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) } sort.Sort(ranges) return findNextRangeStart(ranges) } func findNextRangeStart(rangeList ranges) (int, error) { startID := defaultRangeStart for _, arange := range rangeList { if wouldOverlap(arange, startID) { startID = arange.Start + arange.Length } } return startID, nil } func wouldOverlap(arange subIDRange, ID int) bool { low := ID high := ID + defaultRangeLen if (low >= arange.Start && low <= arange.Start+arange.Length) || (high <= arange.Start+arange.Length && high >= arange.Start) { return true } return false }
9,457
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/idtools.go
package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bufio" "fmt" "os" "strconv" "strings" ) // IDMap contains a single entry for user namespace range remapping. An array // of IDMap entries represents the structure that will be provided to the Linux // kernel for creating a user namespace. type IDMap struct { ContainerID int `json:"container_id"` HostID int `json:"host_id"` Size int `json:"size"` } type subIDRange struct { Start int Length int } type ranges []subIDRange func (e ranges) Len() int { return len(e) } func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( subuidFileName = "/etc/subuid" subgidFileName = "/etc/subgid" ) // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { return mkdirAs(path, mode, owner, true, true) } // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership. // Note that unlike os.Mkdir(), this function does not return IsExist error // in case path already exists. func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { return mkdirAs(path, mode, owner, false, true) } // MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { return mkdirAs(path, mode, owner, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { uid, err := toHost(0, uidMap) if err != nil { return -1, -1, err } gid, err := toHost(0, gidMap) if err != nil { return -1, -1, err } return uid, gid, nil } // toContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id func toContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } for _, m := range idMap { if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { contID := m.ContainerID + (hostID - m.HostID) return contID, nil } } return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } // toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # func toHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } for _, m := range idMap { if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { hostID := m.HostID + (contID - m.ContainerID) return hostID, nil } } return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } // Identity is either a UID and GID pair or a SID (but not both) type Identity struct { UID int GID int SID string } // IdentityMapping contains a mappings of UIDs and GIDs type IdentityMapping struct { uids []IDMap gids []IDMap } // NewIdentityMapping takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { subuidRanges, err := parseSubuid(username) if err != nil { return nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { return nil, err } if len(subuidRanges) == 0 { return nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil } // NewIDMappingsFromMaps creates a new mapping from two slices // Deprecated: this is a temporary shim while transitioning to IDMapping func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { return &IdentityMapping{uids: uids, gids: gids} } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. func (i *IdentityMapping) RootPair() Identity { uid, gid, _ := GetRootUIDGID(i.uids, i.gids) return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() if pair.UID != target.UID { target.UID, err = toHost(pair.UID, i.uids) if err != nil { return target, err } } if pair.GID != target.GID { target.GID, err = toHost(pair.GID, i.gids) } return target, err } // ToContainer returns the container UID and GID for the host uid and gid func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { uid, err := toContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } gid, err := toContainer(pair.GID, i.gids) return uid, gid, err } // Empty returns true if there are no id mappings func (i *IdentityMapping) Empty() bool { return len(i.uids) == 0 && len(i.gids) == 0 } // UIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs func (i *IdentityMapping) UIDs() []IDMap { return i.uids } // GIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs func (i *IdentityMapping) GIDs() []IDMap { return i.gids } func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ ContainerID: containerID, HostID: idrange.Start, Size: idrange.Length, }) containerID = containerID + idrange.Length } return idMap } func parseSubuid(username string) (ranges, error) { return parseSubidFile(subuidFileName, username) } func parseSubgid(username string) (ranges, error) { return parseSubidFile(subgidFileName, username) } // parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) // and return all found ranges for a specified username. If the special value // "ALL" is supplied for username, then all ranges in the file will be returned func parseSubidFile(path, username string) (ranges, error) { var rangeList ranges subidFile, err := os.Open(path) if err != nil { return rangeList, err } defer subidFile.Close() s := bufio.NewScanner(subidFile) for s.Scan() { if err := s.Err(); err != nil { return rangeList, err } text := strings.TrimSpace(s.Text()) if text == "" || strings.HasPrefix(text, "#") { continue } parts := strings.Split(text, ":") if len(parts) != 3 { return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) } if parts[0] == username || username == "ALL" { startid, err := strconv.Atoi(parts[1]) if err != nil { return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) } length, err := strconv.Atoi(parts[2]) if err != nil { return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) } rangeList = append(rangeList, subIDRange{startid, length}) } } return rangeList, nil }
9,458
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
package idtools // import "github.com/docker/docker/pkg/idtools" import ( "os" "github.com/docker/docker/pkg/system" ) // This is currently a wrapper around MkdirAll, however, since currently // permissions aren't set through this path, the identity isn't utilized. // Ownership is handled elsewhere, but in the future could be support here // too. func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { if err := system.MkdirAll(path, mode, ""); err != nil { return err } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true func CanAccess(path string, identity Identity) bool { return true }
9,459
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go
package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" ) // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { return "USERPROFILE" } // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { return os.Getenv(Key()) } // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "%USERPROFILE%" // be careful while using in format functions }
9,460
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
// +build !windows package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" "github.com/opencontainers/runc/libcontainer/user" ) // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { return "HOME" } // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) if home == "" { if u, err := user.CurrentUser(); err == nil { return u.Home } } return home } // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "~" }
9,461
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
package homedir // import "github.com/docker/docker/pkg/homedir" import ( "errors" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/idtools" ) // GetStatic returns the home directory for the current user without calling // os/user.Current(). This is useful for static-linked binary on glibc-based // system, because a call to os/user.Current() in a static binary leads to // segfault due to a glibc issue that won't be fixed in a short term. // (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) func GetStatic() (string, error) { uid := os.Getuid() usr, err := idtools.LookupUID(uid) if err != nil { return "", err } return usr.Home, nil } // GetRuntimeDir returns XDG_RUNTIME_DIR. // XDG_RUNTIME_DIR is typically configured via pam_systemd. // GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetRuntimeDir() (string, error) { if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { return xdgRuntimeDir, nil } return "", errors.New("could not get XDG_RUNTIME_DIR") } // StickRuntimeDirContents sets the sticky bit on files that are under // XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. // // StickyRuntimeDir returns slice of sticked files. // StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func StickRuntimeDirContents(files []string) ([]string, error) { runtimeDir, err := GetRuntimeDir() if err != nil { // ignore error if runtimeDir is empty return nil, nil } runtimeDir, err = filepath.Abs(runtimeDir) if err != nil { return nil, err } var sticked []string for _, f := range files { f, err = filepath.Abs(f) if err != nil { return sticked, err } if strings.HasPrefix(f, runtimeDir+"/") { if err = stick(f); err != nil { return sticked, err } sticked = append(sticked, f) } } return sticked, nil } func stick(f string) error { st, err := os.Stat(f) if err != nil { return err } m := st.Mode() m |= os.ModeSticky return os.Chmod(f, m) } // GetDataHome returns XDG_DATA_HOME. // GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetDataHome() (string, error) { if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { return xdgDataHome, nil } home := os.Getenv("HOME") if home == "" { return "", errors.New("could not get either XDG_DATA_HOME or HOME") } return filepath.Join(home, ".local", "share"), nil } // GetConfigHome returns XDG_CONFIG_HOME. // GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetConfigHome() (string, error) { if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { return xdgConfigHome, nil } home := os.Getenv("HOME") if home == "" { return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") } return filepath.Join(home, ".config"), nil }
9,462
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
// +build !linux package homedir // import "github.com/docker/docker/pkg/homedir" import ( "errors" ) // GetStatic is not needed for non-linux systems. // (Precisely, it is needed only for glibc-based linux systems.) func GetStatic() (string, error) { return "", errors.New("homedir.GetStatic() is not supported on this system") } // GetRuntimeDir is unsupported on non-linux system. func GetRuntimeDir() (string, error) { return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") } // StickRuntimeDirContents is unsupported on non-linux system. func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } // GetDataHome is unsupported on non-linux system. func GetDataHome() (string, error) { return "", errors.New("homedir.GetDataHome() is not supported on this system") } // GetConfigHome is unsupported on non-linux system. func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") }
9,463
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { return ensureMountedAs(mountPoint, SHARED) } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { return ensureMountedAs(mountPoint, RSHARED) } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { return ensureMountedAs(mountPoint, PRIVATE) } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { return ensureMountedAs(mountPoint, RPRIVATE) } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { return ensureMountedAs(mountPoint, SLAVE) } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { return ensureMountedAs(mountPoint, RSLAVE) } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, UNBINDABLE) } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, RUNBINDABLE) } // MakeMount ensures that the file or directory given is a mount point, // bind mounting it to itself it case it is not. func MakeMount(mnt string) error { mounted, err := Mounted(mnt) if err != nil { return err } if mounted { return nil } return mount(mnt, mnt, "none", uintptr(BIND), "") } func ensureMountedAs(mnt string, flags int) error { if err := MakeMount(mnt); err != nil { return err } return mount("", mnt, "none", uintptr(flags), "") }
9,464
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
// +build !linux,!freebsd freebsd,!cgo package mount // import "github.com/docker/docker/pkg/mount" // These flags are unsupported. const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NOATIME = 0 NODEV = 0 NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 REMOUNT = 0 STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 mntDetach = 0 )
9,465
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
package mount // import "github.com/docker/docker/pkg/mount" // Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc/<pid>/mountinfo file. type Info struct { // ID is a unique identifier of the mount (may be reused after umount). ID int // Parent indicates the ID of the mount parent (or of self for the top of the // mount tree). Parent int // Major indicates one half of the device ID which identifies the device class. Major int // Minor indicates one half of the device ID which identifies a specific // instance of device. Minor int // Root of the mount within the filesystem. Root string // Mountpoint indicates the mount point relative to the process's root. Mountpoint string // Opts represents mount-specific options. Opts string // Optional represents optional fields. Optional string // Fstype indicates the type of filesystem, such as EXT3. Fstype string // Source indicates filesystem specific information or "none". Source string // VfsOpts represents per super block options. VfsOpts string }
9,466
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mount.go
package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" "strconv" "strings" "github.com/sirupsen/logrus" ) // mountError records an error from mount or unmount operation type mountError struct { op string source, target string flags uintptr data string err error } func (e *mountError) Error() string { out := e.op + " " if e.source != "" { out += e.source + ":" + e.target } else { out += e.target } if e.flags != uintptr(0) { out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) } if e.data != "" { out += ", data: " + e.data } out += ": " + e.err.Error() return out } // Cause returns the underlying cause of the error func (e *mountError) Cause() error { return e.err } // FilterFunc is a type defining a callback function // to filter out unwanted entries. It takes a pointer // to an Info struct (not fully populated, currently // only Mountpoint is filled in), and returns two booleans: // - skip: true if the entry should be skipped // - stop: true if parsing should be stopped after the entry type FilterFunc func(*Info) (skip, stop bool) // PrefixFilter discards all entries whose mount points // do not start with a prefix specified func PrefixFilter(prefix string) FilterFunc { return func(m *Info) (bool, bool) { skip := !strings.HasPrefix(m.Mountpoint, prefix) return skip, false } } // SingleEntryFilter looks for a specific entry func SingleEntryFilter(mp string) FilterFunc { return func(m *Info) (bool, bool) { if m.Mountpoint == mp { return false, true // don't skip, stop now } return true, false // skip, keep going } } // ParentsFilter returns all entries whose mount points // can be parents of a path specified, discarding others. // For example, given `/var/lib/docker/something`, entries // like `/var/lib/docker`, `/var` and `/` are returned. func ParentsFilter(path string) FilterFunc { return func(m *Info) (bool, bool) { skip := !strings.HasPrefix(path, m.Mountpoint) return skip, false } } // GetMounts retrieves a list of mounts for the current running process, // with an optional filter applied (use nil for no filter). func GetMounts(f FilterFunc) ([]*Info, error) { return parseMountTable(f) } // Mounted determines if a specified mountpoint has been mounted. // On Linux it looks at /proc/self/mountinfo. func Mounted(mountpoint string) (bool, error) { entries, err := GetMounts(SingleEntryFilter(mountpoint)) if err != nil { return false, err } return len(entries) > 0, nil } // Mount will mount filesystem according to the specified configuration, on the // condition that the target path is *not* already mounted. Options must be // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { flag, data := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } return mount(device, target, mType, uintptr(flag), data) } // ForceMount will mount a filesystem according to the specified configuration, // *regardless* if the target path is not already mounted. Options must be // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) return mount(device, target, mType, uintptr(flag), data) } // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { return unmount(target, mntDetach) } // RecursiveUnmount unmounts the target and all mounts underneath, starting with // the deepsest mount first. func RecursiveUnmount(target string) error { mounts, err := parseMountTable(PrefixFilter(target)) if err != nil { return err } // Make the deepest mount be first sort.Slice(mounts, func(i, j int) bool { return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) }) for i, m := range mounts { logrus.Debugf("Trying to unmount %s", m.Mountpoint) err = unmount(m.Mountpoint, mntDetach) if err != nil { if i == len(mounts)-1 { // last mount if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { return err } } else { // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) } } logrus.Debugf("Unmounted %s", m.Mountpoint) } return nil }
9,467
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
// +build !linux,!freebsd freebsd,!cgo package mount // import "github.com/docker/docker/pkg/mount" func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") }
9,468
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
// +build freebsd,cgo package mount // import "github.com/docker/docker/pkg/mount" /* #include <sys/mount.h> */ import "C" const ( // RDONLY will mount the filesystem as read-only. RDONLY = C.MNT_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. NOSUID = C.MNT_NOSUID // NOEXEC will not allow execution of any binaries on the mounted file system. NOEXEC = C.MNT_NOEXEC // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. SYNCHRONOUS = C.MNT_SYNCHRONOUS // NOATIME will not update the file access time when reading from a file. NOATIME = C.MNT_NOATIME ) // These flags are unsupported. const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 mntDetach = 0 )
9,469
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
package mount // import "github.com/docker/docker/pkg/mount" /* #include <sys/param.h> #include <sys/ucred.h> #include <sys/mount.h> */ import "C" import ( "fmt" "reflect" "unsafe" ) //parseMountTable returns information about mounted filesystems func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) if count == 0 { return nil, fmt.Errorf("Failed to call getmntinfo") } var entries []C.struct_statfs header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) header.Cap = count header.Len = count header.Data = uintptr(unsafe.Pointer(rawEntries)) var out []*Info for _, entry := range entries { var mountinfo Info var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) if filter != nil { // filter out entries we're not interested in skip, stop = filter(&mountinfo) if skip { continue } } mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) if stop { break } } return out, nil }
9,470
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
// +build !windows package mount // import "github.com/docker/docker/pkg/mount" import "golang.org/x/sys/unix" func unmount(target string, flags int) error { err := unix.Unmount(target, flags) if err == nil || err == unix.EINVAL { // Ignore "not mounted" error here. Note the same error // can be returned if flags are invalid, so this code // assumes that the flags value is always correct. return nil } return &mountError{ op: "umount", target: target, flags: uintptr(flags), err: err, } }
9,471
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
package mount // import "github.com/docker/docker/pkg/mount" func parseMountTable(f FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil }
9,472
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
// +build windows package mount // import "github.com/docker/docker/pkg/mount" func unmount(target string, flag int) error { panic("Not implemented") }
9,473
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
package mount // import "github.com/docker/docker/pkg/mount" /* #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/_iovec.h> #include <sys/mount.h> #include <sys/param.h> */ import "C" import ( "strings" "syscall" "unsafe" ) func allocateIOVecs(options []string) []C.struct_iovec { out := make([]C.struct_iovec, len(options)) for i, option := range options { out[i].iov_base = unsafe.Pointer(C.CString(option)) out[i].iov_len = C.size_t(len(option) + 1) } return out } func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false xs := strings.Split(data, ",") for _, x := range xs { if x == "bind" { isNullFS = true } } options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { options = append(options, "fstype", mType, "from", device) } rawOptions := allocateIOVecs(options) for _, rawOption := range rawOptions { defer C.free(rawOption.iov_base) } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { return &mountError{ op: "mount", source: device, target: target, flags: flag, err: syscall.Errno(errno), } } return nil }
9,474
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
package mount // import "github.com/docker/docker/pkg/mount" import ( "golang.org/x/sys/unix" ) const ( // RDONLY will mount the file system read-only. RDONLY = unix.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. NOSUID = unix.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. NODEV = unix.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. NOEXEC = unix.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. SYNCHRONOUS = unix.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. DIRSYNC = unix.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. REMOUNT = unix.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. MANDLOCK = unix.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. NOATIME = unix.MS_NOATIME // NODIRATIME will not update the directory access time. NODIRATIME = unix.MS_NODIRATIME // BIND remounts a subtree somewhere else. BIND = unix.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. RBIND = unix.MS_BIND | unix.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. UNBINDABLE = unix.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC // PRIVATE creates a mount which carries no propagation abilities. PRIVATE = unix.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. RPRIVATE = unix.MS_PRIVATE | unix.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. SLAVE = unix.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. RSLAVE = unix.MS_SLAVE | unix.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. SHARED = unix.MS_SHARED // RSHARED marks the entire mount tree as SHARED. RSHARED = unix.MS_SHARED | unix.MS_REC // RELATIME updates inode access times relative to modify or change time. RELATIME = unix.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. STRICTATIME = unix.MS_STRICTATIME mntDetach = unix.MNT_DETACH )
9,475
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
// +build !windows,!linux,!freebsd freebsd,!cgo package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" "runtime" ) func parseMountTable(f FilterFunc) ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) }
9,476
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/flags.go
package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" "strings" ) var flags = map[string]struct { clear bool flag int }{ "defaults": {false, 0}, "ro": {false, RDONLY}, "rw": {true, RDONLY}, "suid": {true, NOSUID}, "nosuid": {false, NOSUID}, "dev": {true, NODEV}, "nodev": {false, NODEV}, "exec": {true, NOEXEC}, "noexec": {false, NOEXEC}, "sync": {false, SYNCHRONOUS}, "async": {true, SYNCHRONOUS}, "dirsync": {false, DIRSYNC}, "remount": {false, REMOUNT}, "mand": {false, MANDLOCK}, "nomand": {true, MANDLOCK}, "atime": {true, NOATIME}, "noatime": {false, NOATIME}, "diratime": {true, NODIRATIME}, "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, "unbindable": {false, UNBINDABLE}, "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, "rprivate": {false, RPRIVATE}, "shared": {false, SHARED}, "rshared": {false, RSHARED}, "slave": {false, SLAVE}, "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, "nostrictatime": {true, STRICTATIME}, } var validFlags = map[string]bool{ "": true, "size": true, "mode": true, "uid": true, "gid": true, "nr_inodes": true, "nr_blocks": true, "mpol": true, } var propagationFlags = map[string]bool{ "bind": true, "rbind": true, "unbindable": true, "runbindable": true, "private": true, "rprivate": true, "shared": true, "rshared": true, "slave": true, "rslave": true, } // MergeTmpfsOptions merge mount options to make sure there is no duplicate. func MergeTmpfsOptions(options []string) ([]string, error) { // We use collisions maps to remove duplicates. // For flag, the key is the flag value (the key for propagation flag is -1) // For data=value, the key is the data flagCollisions := map[int]bool{} dataCollisions := map[string]bool{} var newOptions []string // We process in reverse order for i := len(options) - 1; i >= 0; i-- { option := options[i] if option == "defaults" { continue } if f, ok := flags[option]; ok && f.flag != 0 { // There is only one propagation mode key := f.flag if propagationFlags[option] { key = -1 } // Check to see if there is collision for flag if !flagCollisions[key] { // We prepend the option and add to collision map newOptions = append([]string{option}, newOptions...) flagCollisions[key] = true } continue } opt := strings.SplitN(option, "=", 2) if len(opt) != 2 || !validFlags[opt[0]] { return nil, fmt.Errorf("Invalid tmpfs option %q", opt) } if !dataCollisions[opt[0]] { // We prepend the option and add to collision map newOptions = append([]string{option}, newOptions...) dataCollisions[opt[0]] = true } } return newOptions, nil } // Parse fstab type mount options into mount() flags // and device specific data func parseOptions(options string) (int, string) { var ( flag int data []string ) for _, o := range strings.Split(options, ",") { // If the option does not exist in the flags table or the flag // is not supported on the platform, // then it is a data value for a specific fs type if f, exists := flags[o]; exists && f.flag != 0 { if f.clear { flag &= ^f.flag } else { flag |= f.flag } } else { data = append(data, o) } } return flag, strings.Join(data, ",") }
9,477
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
package mount // import "github.com/docker/docker/pkg/mount" import ( "golang.org/x/sys/unix" ) const ( // ptypes is the set propagation types. ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE // pflags is the full set valid flags for a change propagation call. pflags = ptypes | unix.MS_REC | unix.MS_SILENT // broflags is the combination of bind and read only broflags = unix.MS_BIND | unix.MS_RDONLY ) // isremount returns true if either device name or flags identify a remount request, false otherwise. func isremount(device string, flags uintptr) bool { switch { // We treat device "" and "none" as a remount request to provide compatibility with // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. case flags&unix.MS_REMOUNT != 0, device == "", device == "none": return true default: return false } } func mount(device, target, mType string, flags uintptr, data string) error { oflags := flags &^ ptypes if !isremount(device, flags) || data != "" { // Initial call applying all non-propagation flags for mount // or remount with changed data if err := unix.Mount(device, target, mType, oflags, data); err != nil { return &mountError{ op: "mount", source: device, target: target, flags: oflags, data: data, err: err, } } } if flags&ptypes != 0 { // Change the propagation type. if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { return &mountError{ op: "remount", target: target, flags: flags & pflags, err: err, } } } if oflags&broflags == broflags { // Remount the bind to apply read only. if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { return &mountError{ op: "remount-ro", target: target, flags: oflags | unix.MS_REMOUNT, err: err, } } } return nil }
9,478
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
package mount // import "github.com/docker/docker/pkg/mount" import ( "bufio" "fmt" "io" "os" "strconv" "strings" "github.com/pkg/errors" ) func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { s := bufio.NewScanner(r) out := []*Info{} var err error for s.Scan() { if err = s.Err(); err != nil { return nil, err } /* See http://man7.org/linux/man-pages/man5/proc.5.html 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) (1) mount ID: unique identifier of the mount (may be reused after umount) (2) parent ID: ID of parent (or of self for the top of the mount tree) (3) major:minor: value of st_dev for files on filesystem (4) root: root of the mount within the filesystem (5) mount point: mount point relative to the process's root (6) mount options: per mount options (7) optional fields: zero or more fields of the form "tag[:value]" (8) separator: marks the end of the optional fields (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options */ text := s.Text() fields := strings.Split(text, " ") numFields := len(fields) if numFields < 10 { // should be at least 10 fields return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) } p := &Info{} // ignore any numbers parsing errors, as there should not be any p.ID, _ = strconv.Atoi(fields[0]) p.Parent, _ = strconv.Atoi(fields[1]) mm := strings.Split(fields[2], ":") if len(mm) != 2 { return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) } p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) if err != nil { return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) } p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) if err != nil { return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) } p.Opts = fields[5] var skip, stop bool if filter != nil { // filter out entries we're not interested in skip, stop = filter(p) if skip { continue } } // one or more optional fields, when a separator (-) i := 6 for ; i < numFields && fields[i] != "-"; i++ { switch i { case 6: p.Optional = fields[6] default: /* NOTE there might be more optional fields before the such as fields[7]...fields[N] (where N < sepIndex), although as of Linux kernel 4.15 the only known ones are mount propagation flags in fields[6]. The correct behavior is to ignore any unknown optional fields. */ break } } if i == numFields { return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) } // There should be 3 fields after the separator... if i+4 > numFields { return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) } // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name // (like "//serv/My Documents") _may_ end up having a space in the last field // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs // option unc= is ignored, so a space should not appear. In here we ignore // those "extra" fields caused by extra spaces. p.Fstype = fields[i+1] p.Source = fields[i+2] p.VfsOpts = fields[i+3] out = append(out, p) if stop { break } } return out, nil } // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts func parseMountTable(filter FilterFunc) ([]*Info, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() return parseInfoFile(f, filter) } // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. func PidMountInfo(pid int) ([]*Info, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return nil, err } defer f.Close() return parseInfoFile(f, nil) }
9,479
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/errdefs/helpers.go
package errdefs // import "github.com/docker/docker/errdefs" import "context" type errNotFound struct{ error } func (errNotFound) NotFound() {} func (e errNotFound) Cause() error { return e.error } // NotFound is a helper to create an error of the class with the same name from any error type func NotFound(err error) error { if err == nil || IsNotFound(err) { return err } return errNotFound{err} } type errInvalidParameter struct{ error } func (errInvalidParameter) InvalidParameter() {} func (e errInvalidParameter) Cause() error { return e.error } // InvalidParameter is a helper to create an error of the class with the same name from any error type func InvalidParameter(err error) error { if err == nil || IsInvalidParameter(err) { return err } return errInvalidParameter{err} } type errConflict struct{ error } func (errConflict) Conflict() {} func (e errConflict) Cause() error { return e.error } // Conflict is a helper to create an error of the class with the same name from any error type func Conflict(err error) error { if err == nil || IsConflict(err) { return err } return errConflict{err} } type errUnauthorized struct{ error } func (errUnauthorized) Unauthorized() {} func (e errUnauthorized) Cause() error { return e.error } // Unauthorized is a helper to create an error of the class with the same name from any error type func Unauthorized(err error) error { if err == nil || IsUnauthorized(err) { return err } return errUnauthorized{err} } type errUnavailable struct{ error } func (errUnavailable) Unavailable() {} func (e errUnavailable) Cause() error { return e.error } // Unavailable is a helper to create an error of the class with the same name from any error type func Unavailable(err error) error { if err == nil || IsUnavailable(err) { return err } return errUnavailable{err} } type errForbidden struct{ error } func (errForbidden) Forbidden() {} func (e errForbidden) Cause() error { return e.error } // Forbidden is a helper to create an error of the class with the same name from any error type func Forbidden(err error) error { if err == nil || IsForbidden(err) { return err } return errForbidden{err} } type errSystem struct{ error } func (errSystem) System() {} func (e errSystem) Cause() error { return e.error } // System is a helper to create an error of the class with the same name from any error type func System(err error) error { if err == nil || IsSystem(err) { return err } return errSystem{err} } type errNotModified struct{ error } func (errNotModified) NotModified() {} func (e errNotModified) Cause() error { return e.error } // NotModified is a helper to create an error of the class with the same name from any error type func NotModified(err error) error { if err == nil || IsNotModified(err) { return err } return errNotModified{err} } type errNotImplemented struct{ error } func (errNotImplemented) NotImplemented() {} func (e errNotImplemented) Cause() error { return e.error } // NotImplemented is a helper to create an error of the class with the same name from any error type func NotImplemented(err error) error { if err == nil || IsNotImplemented(err) { return err } return errNotImplemented{err} } type errUnknown struct{ error } func (errUnknown) Unknown() {} func (e errUnknown) Cause() error { return e.error } // Unknown is a helper to create an error of the class with the same name from any error type func Unknown(err error) error { if err == nil || IsUnknown(err) { return err } return errUnknown{err} } type errCancelled struct{ error } func (errCancelled) Cancelled() {} func (e errCancelled) Cause() error { return e.error } // Cancelled is a helper to create an error of the class with the same name from any error type func Cancelled(err error) error { if err == nil || IsCancelled(err) { return err } return errCancelled{err} } type errDeadline struct{ error } func (errDeadline) DeadlineExceeded() {} func (e errDeadline) Cause() error { return e.error } // Deadline is a helper to create an error of the class with the same name from any error type func Deadline(err error) error { if err == nil || IsDeadline(err) { return err } return errDeadline{err} } type errDataLoss struct{ error } func (errDataLoss) DataLoss() {} func (e errDataLoss) Cause() error { return e.error } // DataLoss is a helper to create an error of the class with the same name from any error type func DataLoss(err error) error { if err == nil || IsDataLoss(err) { return err } return errDataLoss{err} } // FromContext returns the error class from the passed in context func FromContext(ctx context.Context) error { e := ctx.Err() if e == nil { return nil } if e == context.Canceled { return Cancelled(e) } if e == context.DeadlineExceeded { return Deadline(e) } return Unknown(e) }
9,480
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/errdefs/defs.go
package errdefs // import "github.com/docker/docker/errdefs" // ErrNotFound signals that the requested object doesn't exist type ErrNotFound interface { NotFound() } // ErrInvalidParameter signals that the user input is invalid type ErrInvalidParameter interface { InvalidParameter() } // ErrConflict signals that some internal state conflicts with the requested action and can't be performed. // A change in state should be able to clear this error. type ErrConflict interface { Conflict() } // ErrUnauthorized is used to signify that the user is not authorized to perform a specific action type ErrUnauthorized interface { Unauthorized() } // ErrUnavailable signals that the requested action/subsystem is not available. type ErrUnavailable interface { Unavailable() } // ErrForbidden signals that the requested action cannot be performed under any circumstances. // When a ErrForbidden is returned, the caller should never retry the action. type ErrForbidden interface { Forbidden() } // ErrSystem signals that some internal error occurred. // An example of this would be a failed mount request. type ErrSystem interface { System() } // ErrNotModified signals that an action can't be performed because it's already in the desired state type ErrNotModified interface { NotModified() } // ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. type ErrNotImplemented interface { NotImplemented() } // ErrUnknown signals that the kind of error that occurred is not known. type ErrUnknown interface { Unknown() } // ErrCancelled signals that the action was cancelled. type ErrCancelled interface { Cancelled() } // ErrDeadline signals that the deadline was reached before the action completed. type ErrDeadline interface { DeadlineExceeded() } // ErrDataLoss indicates that data was lost or there is data corruption. type ErrDataLoss interface { DataLoss() }
9,481
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/errdefs/doc.go
// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. // Errors that cross the package boundary should implement one (and only one) of these interfaces. // // Packages should not reference these interfaces directly, only implement them. // To check if a particular error implements one of these interfaces, there are helper // functions provided (e.g. `Is<SomeError>`) which can be used rather than asserting the interfaces directly. // If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). package errdefs // import "github.com/docker/docker/errdefs"
9,482
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/errdefs/is.go
package errdefs // import "github.com/docker/docker/errdefs" type causer interface { Cause() error } func getImplementer(err error) error { switch e := err.(type) { case ErrNotFound, ErrInvalidParameter, ErrConflict, ErrUnauthorized, ErrUnavailable, ErrForbidden, ErrSystem, ErrNotModified, ErrNotImplemented, ErrCancelled, ErrDeadline, ErrDataLoss, ErrUnknown: return err case causer: return getImplementer(e.Cause()) default: return err } } // IsNotFound returns if the passed in error is an ErrNotFound func IsNotFound(err error) bool { _, ok := getImplementer(err).(ErrNotFound) return ok } // IsInvalidParameter returns if the passed in error is an ErrInvalidParameter func IsInvalidParameter(err error) bool { _, ok := getImplementer(err).(ErrInvalidParameter) return ok } // IsConflict returns if the passed in error is an ErrConflict func IsConflict(err error) bool { _, ok := getImplementer(err).(ErrConflict) return ok } // IsUnauthorized returns if the passed in error is an ErrUnauthorized func IsUnauthorized(err error) bool { _, ok := getImplementer(err).(ErrUnauthorized) return ok } // IsUnavailable returns if the passed in error is an ErrUnavailable func IsUnavailable(err error) bool { _, ok := getImplementer(err).(ErrUnavailable) return ok } // IsForbidden returns if the passed in error is an ErrForbidden func IsForbidden(err error) bool { _, ok := getImplementer(err).(ErrForbidden) return ok } // IsSystem returns if the passed in error is an ErrSystem func IsSystem(err error) bool { _, ok := getImplementer(err).(ErrSystem) return ok } // IsNotModified returns if the passed in error is a NotModified error func IsNotModified(err error) bool { _, ok := getImplementer(err).(ErrNotModified) return ok } // IsNotImplemented returns if the passed in error is an ErrNotImplemented func IsNotImplemented(err error) bool { _, ok := getImplementer(err).(ErrNotImplemented) return ok } // IsUnknown returns if the passed in error is an ErrUnknown func IsUnknown(err error) bool { _, ok := getImplementer(err).(ErrUnknown) return ok } // IsCancelled returns if the passed in error is an ErrCancelled func IsCancelled(err error) bool { _, ok := getImplementer(err).(ErrCancelled) return ok } // IsDeadline returns if the passed in error is an ErrDeadline func IsDeadline(err error) bool { _, ok := getImplementer(err).(ErrDeadline) return ok } // IsDataLoss returns if the passed in error is an ErrDataLoss func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok }
9,483
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/errdefs/http_helpers.go
package errdefs // import "github.com/docker/docker/errdefs" import ( "fmt" "net/http" containerderrors "github.com/containerd/containerd/errdefs" "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // GetHTTPErrorStatusCode retrieves status code from error message. func GetHTTPErrorStatusCode(err error) int { if err == nil { logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") return http.StatusInternalServerError } var statusCode int // Stop right there // Are you sure you should be adding a new error class here? Do one of the existing ones work? // Note that the below functions are already checking the error causal chain for matches. switch { case IsNotFound(err): statusCode = http.StatusNotFound case IsInvalidParameter(err): statusCode = http.StatusBadRequest case IsConflict(err): statusCode = http.StatusConflict case IsUnauthorized(err): statusCode = http.StatusUnauthorized case IsUnavailable(err): statusCode = http.StatusServiceUnavailable case IsForbidden(err): statusCode = http.StatusForbidden case IsNotModified(err): statusCode = http.StatusNotModified case IsNotImplemented(err): statusCode = http.StatusNotImplemented case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): statusCode = http.StatusInternalServerError default: statusCode = statusCodeFromGRPCError(err) if statusCode != http.StatusInternalServerError { return statusCode } statusCode = statusCodeFromContainerdError(err) if statusCode != http.StatusInternalServerError { return statusCode } statusCode = statusCodeFromDistributionError(err) if statusCode != http.StatusInternalServerError { return statusCode } if e, ok := err.(causer); ok { return GetHTTPErrorStatusCode(e.Cause()) } logrus.WithFields(logrus.Fields{ "module": "api", "error_type": fmt.Sprintf("%T", err), }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) } if statusCode == 0 { statusCode = http.StatusInternalServerError } return statusCode } // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { return err } switch statusCode { case http.StatusNotFound: err = NotFound(err) case http.StatusBadRequest: err = InvalidParameter(err) case http.StatusConflict: err = Conflict(err) case http.StatusUnauthorized: err = Unauthorized(err) case http.StatusServiceUnavailable: err = Unavailable(err) case http.StatusForbidden: err = Forbidden(err) case http.StatusNotModified: err = NotModified(err) case http.StatusNotImplemented: err = NotImplemented(err) case http.StatusInternalServerError: if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { err = System(err) } default: logrus.WithFields(logrus.Fields{ "module": "api", "status_code": fmt.Sprintf("%d", statusCode), }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) switch { case statusCode >= 200 && statusCode < 400: // it's a client error case statusCode >= 400 && statusCode < 500: err = InvalidParameter(err) case statusCode >= 500 && statusCode < 600: err = System(err) default: err = Unknown(err) } } return err } // statusCodeFromGRPCError returns status code according to gRPC error func statusCodeFromGRPCError(err error) int { switch status.Code(err) { case codes.InvalidArgument: // code 3 return http.StatusBadRequest case codes.NotFound: // code 5 return http.StatusNotFound case codes.AlreadyExists: // code 6 return http.StatusConflict case codes.PermissionDenied: // code 7 return http.StatusForbidden case codes.FailedPrecondition: // code 9 return http.StatusBadRequest case codes.Unauthenticated: // code 16 return http.StatusUnauthorized case codes.OutOfRange: // code 11 return http.StatusBadRequest case codes.Unimplemented: // code 12 return http.StatusNotImplemented case codes.Unavailable: // code 14 return http.StatusServiceUnavailable default: // codes.Canceled(1) // codes.Unknown(2) // codes.DeadlineExceeded(4) // codes.ResourceExhausted(8) // codes.Aborted(10) // codes.Internal(13) // codes.DataLoss(15) return http.StatusInternalServerError } } // statusCodeFromDistributionError returns status code according to registry errcode // code is loosely based on errcode.ServeJSON() in docker/distribution func statusCodeFromDistributionError(err error) int { switch errs := err.(type) { case errcode.Errors: if len(errs) < 1 { return http.StatusInternalServerError } if _, ok := errs[0].(errcode.ErrorCoder); ok { return statusCodeFromDistributionError(errs[0]) } case errcode.ErrorCoder: return errs.ErrorCode().Descriptor().HTTPStatusCode } return http.StatusInternalServerError } // statusCodeFromContainerdError returns status code for containerd errors when // consumed directly (not through gRPC) func statusCodeFromContainerdError(err error) int { switch { case containerderrors.IsInvalidArgument(err): return http.StatusBadRequest case containerderrors.IsNotFound(err): return http.StatusNotFound case containerderrors.IsAlreadyExists(err): return http.StatusConflict case containerderrors.IsFailedPrecondition(err): return http.StatusPreconditionFailed case containerderrors.IsUnavailable(err): return http.StatusServiceUnavailable case containerderrors.IsNotImplemented(err): return http.StatusNotImplemented default: return http.StatusInternalServerError } }
9,484
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/registry.go
// Package registry contains client primitives to interact with a remote Docker registry. package registry // import "github.com/docker/docker/registry" import ( "crypto/tls" "errors" "fmt" "io/ioutil" "net" "net/http" "os" "path/filepath" "strings" "time" "github.com/docker/distribution/registry/client/transport" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/sirupsen/logrus" ) var ( // ErrAlreadyExists is an error returned if an image being pushed // already exists on the remote side ErrAlreadyExists = errors.New("Image already exists") ) func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { // PreferredServerCipherSuites should have no effect tlsConfig := tlsconfig.ServerDefault() tlsConfig.InsecureSkipVerify = !isSecure if isSecure && CertsDir != "" { hostDir := filepath.Join(CertsDir, cleanPath(hostname)) logrus.Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { return nil, err } } return tlsConfig, nil } func hasFile(files []os.FileInfo, name string) bool { for _, f := range files { if f.Name() == name { return true } } return false } // ReadCertsDirectory reads the directory for TLS certificates // including roots and certificate pairs and updates the // provided TLS configuration. func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { fs, err := ioutil.ReadDir(directory) if err != nil && !os.IsNotExist(err) { return err } for _, f := range fs { if strings.HasSuffix(f.Name(), ".crt") { if tlsConfig.RootCAs == nil { systemPool, err := tlsconfig.SystemCertPool() if err != nil { return fmt.Errorf("unable to get system cert pool: %v", err) } tlsConfig.RootCAs = systemPool } logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) if err != nil { return err } tlsConfig.RootCAs.AppendCertsFromPEM(data) } if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { return err } tlsConfig.Certificates = append(tlsConfig.Certificates, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) } } } return nil } // Headers returns request modifiers with a User-Agent and metaHeaders func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { modifiers := []transport.RequestModifier{} if userAgent != "" { modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ "User-Agent": []string{userAgent}, })) } if metaHeaders != nil { modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) } return modifiers } // HTTPClient returns an HTTP client structure which uses the given transport // and contains the necessary headers for redirected requests func HTTPClient(transport http.RoundTripper) *http.Client { return &http.Client{ Transport: transport, CheckRedirect: addRequiredHeadersToRedirectedRequests, } } func trustedLocation(req *http.Request) bool { var ( trusteds = []string{"docker.com", "docker.io"} hostname = strings.SplitN(req.Host, ":", 2)[0] ) if req.URL.Scheme != "https" { return false } for _, trusted := range trusteds { if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { return true } } return false } // addRequiredHeadersToRedirectedRequests adds the necessary redirection headers // for redirected requests func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { if len(via) != 0 && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil } for k, v := range via[0].Header { if k != "Authorization" { for _, vv := range v { req.Header.Add(k, vv) } } } } return nil } // NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the // default TLS configuration. func NewTransport(tlsConfig *tls.Config) *http.Transport { if tlsConfig == nil { tlsConfig = tlsconfig.ServerDefault() } direct := &net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, } base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: direct.Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, // TODO(dmcgowan): Call close idle connections when complete and use keep alive DisableKeepAlives: true, } proxyDialer, err := sockets.DialerFromEnvironment(direct) if err == nil { base.Dial = proxyDialer.Dial } return base }
9,485
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/config.go
package registry // import "github.com/docker/docker/registry" import ( "fmt" "net" "net/url" "regexp" "strconv" "strings" "github.com/docker/distribution/reference" registrytypes "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // ServiceOptions holds command line options. type ServiceOptions struct { AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` Mirrors []string `json:"registry-mirrors,omitempty"` InsecureRegistries []string `json:"insecure-registries,omitempty"` } // serviceConfig holds daemon configuration for the registry service. type serviceConfig struct { registrytypes.ServiceConfig } var ( // DefaultNamespace is the default namespace DefaultNamespace = "docker.io" // DefaultRegistryVersionHeader is the name of the default HTTP header // that carries Registry version info DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" // IndexHostname is the index hostname IndexHostname = "index.docker.io" // IndexServer is used for user auth and image search IndexServer = "https://" + IndexHostname + "/v1/" // IndexName is the name of the index IndexName = "docker.io" // DefaultV2Registry is the URI of the default v2 registry DefaultV2Registry = &url.URL{ Scheme: "https", Host: "registry-1.docker.io", } ) var ( // ErrInvalidRepositoryName is an error returned if the repository name did // not have the correct form ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) ) var ( validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) ) // for mocking in unit tests var lookupIP = net.LookupIP // newServiceConfig returns a new instance of ServiceConfig func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { config := &serviceConfig{ ServiceConfig: registrytypes.ServiceConfig{ InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), IndexConfigs: make(map[string]*registrytypes.IndexInfo), // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. }, } if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { return nil, err } if err := config.LoadMirrors(options.Mirrors); err != nil { return nil, err } if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { return nil, err } return config, nil } // LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { cidrs := map[string]*registrytypes.NetIPNet{} hostnames := map[string]bool{} for _, r := range registries { if _, err := ValidateIndexName(r); err != nil { return err } if validateNoScheme(r) != nil { return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) } if _, ipnet, err := net.ParseCIDR(r); err == nil { // Valid CIDR. cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) } else if err := validateHostPort(r); err == nil { // Must be `host:port` if not CIDR. hostnames[r] = true } else { return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) } } config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) for _, c := range cidrs { config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) } config.AllowNondistributableArtifactsHostnames = make([]string, 0) for h := range hostnames { config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) } return nil } // LoadMirrors loads mirrors to config, after removing duplicates. // Returns an error if mirrors contains an invalid mirror. func (config *serviceConfig) LoadMirrors(mirrors []string) error { mMap := map[string]struct{}{} unique := []string{} for _, mirror := range mirrors { m, err := ValidateMirror(mirror) if err != nil { return err } if _, exist := mMap[m]; !exist { mMap[m] = struct{}{} unique = append(unique, m) } } config.Mirrors = unique // Configure public registry since mirrors may have changed. config.IndexConfigs[IndexName] = &registrytypes.IndexInfo{ Name: IndexName, Mirrors: config.Mirrors, Secure: true, Official: true, } return nil } // LoadInsecureRegistries loads insecure registries to config func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). // // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change // daemon flags on boot2docker? registries = append(registries, "127.0.0.0/8") // Store original InsecureRegistryCIDRs and IndexConfigs // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs originalIndexInfos := config.ServiceConfig.IndexConfigs config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) skip: for _, r := range registries { // validate insecure registry if _, err := ValidateIndexName(r); err != nil { // before returning err, roll back to original data config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs config.ServiceConfig.IndexConfigs = originalIndexInfos return err } if strings.HasPrefix(strings.ToLower(r), "http://") { logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) r = r[7:] } else if strings.HasPrefix(strings.ToLower(r), "https://") { logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) r = r[8:] } else if validateNoScheme(r) != nil { // Insecure registry should not contain '://' // before returning err, roll back to original data config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs config.ServiceConfig.IndexConfigs = originalIndexInfos return fmt.Errorf("insecure registry %s should not contain '://'", r) } // Check if CIDR was passed to --insecure-registry _, ipnet, err := net.ParseCIDR(r) if err == nil { // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. data := (*registrytypes.NetIPNet)(ipnet) for _, value := range config.InsecureRegistryCIDRs { if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { continue skip } } // ipnet is not found, add it in config.InsecureRegistryCIDRs config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) } else { if err := validateHostPort(r); err != nil { config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs config.ServiceConfig.IndexConfigs = originalIndexInfos return fmt.Errorf("insecure registry %s is not valid: %v", r, err) } // Assume `host:port` if not CIDR. config.IndexConfigs[r] = &registrytypes.IndexInfo{ Name: r, Mirrors: make([]string, 0), Secure: false, Official: false, } } } // Configure public registry. config.IndexConfigs[IndexName] = &registrytypes.IndexInfo{ Name: IndexName, Mirrors: config.Mirrors, Secure: true, Official: true, } return nil } // allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries // that allow push of nondistributable artifacts. // // The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP // of the registry specified by hostname, true is returned. // // hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name // or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If // resolution fails, CIDR matching is not performed. func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { for _, h := range config.AllowNondistributableArtifactsHostnames { if h == hostname { return true } } return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) } // isSecureIndex returns false if the provided indexName is part of the list of insecure registries // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. // // The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. // If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered // insecure. // // indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name // or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained // in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element // of insecureRegistries. func isSecureIndex(config *serviceConfig, indexName string) bool { // Check for configured index, first. This is needed in case isSecureIndex // is called from anything besides newIndexInfo, in order to honor per-index configurations. if index, ok := config.IndexConfigs[indexName]; ok { return index.Secure } return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) } // isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) // where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be // resolved to IP addresses for matching. If resolution fails, false is returned. func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { host, _, err := net.SplitHostPort(URLHost) if err != nil { // Assume URLHost is of the form `host` without the port and go on. host = URLHost } addrs, err := lookupIP(host) if err != nil { ip := net.ParseIP(host) if ip != nil { addrs = []net.IP{ip} } // if ip == nil, then `host` is neither an IP nor it could be looked up, // either because the index is unreachable, or because the index is behind an HTTP proxy. // So, len(addrs) == 0 and we're not aborting. } // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. for _, addr := range addrs { for _, ipnet := range cidrs { // check if the addr falls in the subnet if (*net.IPNet)(ipnet).Contains(addr) { return true } } } return false } // ValidateMirror validates an HTTP(S) registry mirror func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) } if uri.Scheme != "http" && uri.Scheme != "https" { return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) } if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) } if uri.User != nil { // strip password from output uri.User = url.UserPassword(uri.User.Username(), "xxxxx") return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) } return strings.TrimSuffix(val, "/") + "/", nil } // ValidateIndexName validates an index name. func ValidateIndexName(val string) (string, error) { // TODO: upstream this to check to reference package if val == "index.docker.io" { val = "docker.io" } if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) } return val, nil } func validateNoScheme(reposName string) error { if strings.Contains(reposName, "://") { // It cannot contain a scheme! return ErrInvalidRepositoryName } return nil } func validateHostPort(s string) error { // Split host and port, and in case s can not be splitted, assume host only host, port, err := net.SplitHostPort(s) if err != nil { host = s port = "" } // If match against the `host:port` pattern fails, // it might be `IPv6:port`, which will be captured by net.ParseIP(host) if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { return fmt.Errorf("invalid host %q", host) } if port != "" { v, err := strconv.Atoi(port) if err != nil { return err } if v < 0 || v > 65535 { return fmt.Errorf("invalid port %q", port) } } return nil } // newIndexInfo returns IndexInfo configuration from indexName func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { var err error indexName, err = ValidateIndexName(indexName) if err != nil { return nil, err } // Return any configured index info, first. if index, ok := config.IndexConfigs[indexName]; ok { return index, nil } // Construct a non-configured index info. index := &registrytypes.IndexInfo{ Name: indexName, Mirrors: make([]string, 0), Official: false, } index.Secure = isSecureIndex(config, indexName) return index, nil } // GetAuthConfigKey special-cases using the full index address of the official // index as the AuthConfig key, and uses the (host)name[:port] for private indexes. func GetAuthConfigKey(index *registrytypes.IndexInfo) string { if index.Official { return IndexServer } return index.Name } // newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { index, err := newIndexInfo(config, reference.Domain(name)) if err != nil { return nil, err } official := !strings.ContainsRune(reference.FamiliarName(name), '/') return &RepositoryInfo{ Name: reference.TrimNamed(name), Index: index, Official: official, }, nil } // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { return newRepositoryInfo(emptyServiceConfig, reposName) } // ParseSearchIndexInfo will use repository name to get back an indexInfo. func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { indexName, _ := splitReposSearchTerm(reposName) indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) if err != nil { return nil, err } return indexInfo, nil }
9,486
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/config_windows.go
package registry // import "github.com/docker/docker/registry" import ( "os" "path/filepath" "strings" ) // CertsDir is the directory where certificates are stored var CertsDir = os.Getenv("programdata") + `\docker\certs.d` // cleanPath is used to ensure that a directory name is valid on the target // platform. It will be passed in something *similar* to a URL such as // https:\index.docker.io\v1. Not all platforms support directory names // which contain those characters (such as : on Windows) func cleanPath(s string) string { return filepath.FromSlash(strings.Replace(s, ":", "", -1)) }
9,487
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/service_v2.go
package registry // import "github.com/docker/docker/registry" import ( "net/url" "strings" "github.com/docker/go-connections/tlsconfig" ) func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { tlsConfig := tlsconfig.ServerDefault() if hostname == DefaultNamespace || hostname == IndexHostname { // v2 mirrors for _, mirror := range s.config.Mirrors { if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { mirror = "https://" + mirror } mirrorURL, err := url.Parse(mirror) if err != nil { return nil, err } mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) if err != nil { return nil, err } endpoints = append(endpoints, APIEndpoint{ URL: mirrorURL, // guess mirrors are v2 Version: APIVersion2, Mirror: true, TrimHostname: true, TLSConfig: mirrorTLSConfig, }) } // v2 registry endpoints = append(endpoints, APIEndpoint{ URL: DefaultV2Registry, Version: APIVersion2, Official: true, TrimHostname: true, TLSConfig: tlsConfig, }) return endpoints, nil } ana := allowNondistributableArtifacts(s.config, hostname) tlsConfig, err = s.tlsConfig(hostname) if err != nil { return nil, err } endpoints = []APIEndpoint{ { URL: &url.URL{ Scheme: "https", Host: hostname, }, Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, TLSConfig: tlsConfig, }, } if tlsConfig.InsecureSkipVerify { endpoints = append(endpoints, APIEndpoint{ URL: &url.URL{ Scheme: "http", Host: hostname, }, Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify TLSConfig: tlsConfig, }) } return endpoints, nil }
9,488
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/auth.go
package registry // import "github.com/docker/docker/registry" import ( "io/ioutil" "net/http" "net/url" "strings" "time" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // AuthClientID is used the ClientID used for the token server AuthClientID = "docker" ) // loginV1 tries to register/login to the v1 registry server. func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { registryEndpoint := apiEndpoint.ToV1Endpoint(userAgent, nil) serverAddress := registryEndpoint.String() logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) if serverAddress == "" { return "", "", errdefs.System(errors.New("server Error: Server Address not set")) } req, err := http.NewRequest("GET", serverAddress+"users/", nil) if err != nil { return "", "", err } req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := registryEndpoint.client.Do(req) if err != nil { // fallback when request could not be completed return "", "", fallbackError{ err: err, } } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", "", errdefs.System(err) } switch resp.StatusCode { case http.StatusOK: return "Login Succeeded", "", nil case http.StatusUnauthorized: return "", "", errdefs.Unauthorized(errors.New("Wrong login/password, please try again")) case http.StatusForbidden: // *TODO: Use registry configuration to determine what this says, if anything? return "", "", errdefs.Forbidden(errors.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)) case http.StatusInternalServerError: logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) return "", "", errdefs.System(errors.New("Internal Server Error")) } return "", "", errdefs.System(errors.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)) } type loginCredentialStore struct { authConfig *types.AuthConfig } func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { return lcs.authConfig.Username, lcs.authConfig.Password } func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { return lcs.authConfig.IdentityToken } func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { lcs.authConfig.IdentityToken = token } type staticCredentialStore struct { auth *types.AuthConfig } // NewStaticCredentialStore returns a credential store // which always returns the same credential values. func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { return staticCredentialStore{ auth: auth, } } func (scs staticCredentialStore) Basic(*url.URL) (string, string) { if scs.auth == nil { return "", "" } return scs.auth.Username, scs.auth.Password } func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { if scs.auth == nil { return "" } return scs.auth.IdentityToken } func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { } type fallbackError struct { err error } func (err fallbackError) Error() string { return err.err.Error() } // loginV2 tries to login to the v2 registry server. The given registry // endpoint will be pinged to get authorization challenges. These challenges // will be used to authenticate against the registry to validate credentials. func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") modifiers := Headers(userAgent, nil) authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) credentialAuthConfig := *authConfig creds := loginCredentialStore{ authConfig: &credentialAuthConfig, } loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) if err != nil { return "", "", err } endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { if !foundV2 { err = fallbackError{err: err} } return "", "", err } resp, err := loginClient.Do(req) if err != nil { err = translateV2AuthError(err) if !foundV2 { err = fallbackError{err: err} } return "", "", err } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { return "Login Succeeded", credentialAuthConfig.IdentityToken, nil } // TODO(dmcgowan): Attempt to further interpret result, status code and error code string err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) if !foundV2 { err = fallbackError{err: err} } return "", "", err } func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) if err != nil { if !foundV2 { err = fallbackError{err: err} } return nil, foundV2, err } tokenHandlerOptions := auth.TokenHandlerOptions{ Transport: authTransport, Credentials: creds, OfflineAccess: true, ClientID: AuthClientID, Scopes: scopes, } tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) tr := transport.NewTransport(authTransport, modifiers...) return &http.Client{ Transport: tr, Timeout: 15 * time.Second, }, foundV2, nil } // ConvertToHostname converts a registry url which has http|https prepended // to just an hostname. func ConvertToHostname(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.TrimPrefix(url, "http://") } else if strings.HasPrefix(url, "https://") { stripped = strings.TrimPrefix(url, "https://") } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } // ResolveAuthConfig matches an auth configuration to a server address or a URL func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { configKey := GetAuthConfigKey(index) // First try the happy case if c, found := authConfigs[configKey]; found || index.Official { return c } // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for registry, ac := range authConfigs { if configKey == ConvertToHostname(registry) { return ac } } // When all else fails, return an empty auth config return types.AuthConfig{} } // PingResponseError is used when the response from a ping // was received but invalid. type PingResponseError struct { Err error } func (err PingResponseError) Error() string { return err.Err.Error() } // PingV2Registry attempts to ping a v2 registry and on success return a // challenge manager for the supported authentication types and // whether v2 was confirmed by the response. If a response is received but // cannot be interpreted a PingResponseError will be returned. func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { var ( foundV2 = false v2Version = auth.APIVersion{ Type: "registry", Version: "2.0", } ) pingClient := &http.Client{ Transport: transport, Timeout: 15 * time.Second, } endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { return nil, false, err } resp, err := pingClient.Do(req) if err != nil { return nil, false, err } defer resp.Body.Close() versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) for _, pingVersion := range versions { if pingVersion == v2Version { // The version header indicates we're definitely // talking to a v2 registry. So don't allow future // fallbacks to the v1 protocol. foundV2 = true break } } challengeManager := challenge.NewSimpleManager() if err := challengeManager.AddResponse(resp); err != nil { return nil, foundV2, PingResponseError{ Err: err, } } return challengeManager, foundV2, nil }
9,489
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/types.go
package registry // import "github.com/docker/docker/registry" import ( "github.com/docker/distribution/reference" registrytypes "github.com/docker/docker/api/types/registry" ) // RepositoryData tracks the image list, list of endpoints for a repository type RepositoryData struct { // ImgList is a list of images in the repository ImgList map[string]*ImgData // Endpoints is a list of endpoints returned in X-Docker-Endpoints Endpoints []string } // ImgData is used to transfer image checksums to and from the registry type ImgData struct { // ID is an opaque string that identifies the image ID string `json:"id"` Checksum string `json:"checksum,omitempty"` ChecksumPayload string `json:"-"` Tag string `json:",omitempty"` } // PingResult contains the information returned when pinging a registry. It // indicates the registry's version and whether the registry claims to be a // standalone registry. type PingResult struct { // Version is the registry version supplied by the registry in an HTTP // header Version string `json:"version"` // Standalone is set to true if the registry indicates it is a // standalone registry in the X-Docker-Registry-Standalone // header Standalone bool `json:"standalone"` } // APIVersion is an integral representation of an API version (presently // either 1 or 2) type APIVersion int func (av APIVersion) String() string { return apiVersions[av] } // API Version identifiers. const ( _ = iota APIVersion1 APIVersion = iota APIVersion2 ) var apiVersions = map[APIVersion]string{ APIVersion1: "v1", APIVersion2: "v2", } // RepositoryInfo describes a repository type RepositoryInfo struct { Name reference.Named // Index points to registry information Index *registrytypes.IndexInfo // Official indicates whether the repository is considered official. // If the registry is official, and the normalized name does not // contain a '/' (e.g. "foo"), then it is considered an official repo. Official bool // Class represents the class of the repository, such as "plugin" // or "image". Class string }
9,490
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/service.go
package registry // import "github.com/docker/docker/registry" import ( "context" "crypto/tls" "net/http" "net/url" "strings" "sync" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // DefaultSearchLimit is the default value for maximum number of returned search results. DefaultSearchLimit = 25 ) // Service is the interface defining what a registry service should implement. type Service interface { Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) ResolveRepository(name reference.Named) (*RepositoryInfo, error) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) ServiceConfig() *registrytypes.ServiceConfig TLSConfig(hostname string) (*tls.Config, error) LoadAllowNondistributableArtifacts([]string) error LoadMirrors([]string) error LoadInsecureRegistries([]string) error } // DefaultService is a registry service. It tracks configuration data such as a list // of mirrors. type DefaultService struct { config *serviceConfig mu sync.Mutex } // NewService returns a new instance of DefaultService ready to be // installed into an engine. func NewService(options ServiceOptions) (*DefaultService, error) { config, err := newServiceConfig(options) return &DefaultService{config: config}, err } // ServiceConfig returns the public registry service configuration. func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { s.mu.Lock() defer s.mu.Unlock() servConfig := registrytypes.ServiceConfig{ AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), AllowNondistributableArtifactsHostnames: make([]string, 0), InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), Mirrors: make([]string, 0), } // construct a new ServiceConfig which will not retrieve s.Config directly, // and look up items in s.config with mu locked servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) for key, value := range s.config.ServiceConfig.IndexConfigs { servConfig.IndexConfigs[key] = value } servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) return &servConfig } // LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { s.mu.Lock() defer s.mu.Unlock() return s.config.LoadAllowNondistributableArtifacts(registries) } // LoadMirrors loads registry mirrors for Service func (s *DefaultService) LoadMirrors(mirrors []string) error { s.mu.Lock() defer s.mu.Unlock() return s.config.LoadMirrors(mirrors) } // LoadInsecureRegistries loads insecure registries for Service func (s *DefaultService) LoadInsecureRegistries(registries []string) error { s.mu.Lock() defer s.mu.Unlock() return s.config.LoadInsecureRegistries(registries) } // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was successful. // It can be used to verify the validity of a client's credentials. func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { // TODO Use ctx when searching for repositories serverAddress := authConfig.ServerAddress if serverAddress == "" { serverAddress = IndexServer } if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { serverAddress = "https://" + serverAddress } u, err := url.Parse(serverAddress) if err != nil { return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) } endpoints, err := s.LookupPushEndpoints(u.Host) if err != nil { return "", "", errdefs.InvalidParameter(err) } for _, endpoint := range endpoints { login := loginV2 if endpoint.Version == APIVersion1 { login = loginV1 } status, token, err = login(authConfig, endpoint, userAgent) if err == nil { return } if fErr, ok := err.(fallbackError); ok { err = fErr.err logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) continue } return "", "", err } return "", "", err } // splitReposSearchTerm breaks a search term into an index name and remote name func splitReposSearchTerm(reposName string) (string, string) { nameParts := strings.SplitN(reposName, "/", 2) var indexName, remoteName string if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) // 'docker.io' indexName = IndexName remoteName = reposName } else { indexName = nameParts[0] remoteName = nameParts[1] } return indexName, remoteName } // Search queries the public registry for images matching the specified // search terms, and returns the results. func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { // TODO Use ctx when searching for repositories if err := validateNoScheme(term); err != nil { return nil, err } indexName, remoteName := splitReposSearchTerm(term) // Search is a long-running operation, just lock s.config to avoid block others. s.mu.Lock() index, err := newIndexInfo(s.config, indexName) s.mu.Unlock() if err != nil { return nil, err } // *TODO: Search multiple indexes. endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) if err != nil { return nil, err } var client *http.Client if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { creds := NewStaticCredentialStore(authConfig) scopes := []auth.Scope{ auth.RegistryScope{ Name: "catalog", Actions: []string{"search"}, }, } modifiers := Headers(userAgent, nil) v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) if err != nil { if fErr, ok := err.(fallbackError); ok { logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) } else { return nil, err } } else if foundV2 { // Copy non transport http client features v2Client.Timeout = endpoint.client.Timeout v2Client.CheckRedirect = endpoint.client.CheckRedirect v2Client.Jar = endpoint.client.Jar logrus.Debugf("using v2 client for search to %s", endpoint.URL) client = v2Client } } if client == nil { client = endpoint.client if err := authorizeClient(client, authConfig, endpoint); err != nil { return nil, err } } r := newSession(client, authConfig, endpoint) if index.Official { localName := remoteName if strings.HasPrefix(localName, "library/") { // If pull "library/foo", it's stored locally under "foo" localName = strings.SplitN(localName, "/", 2)[1] } return r.SearchRepositories(localName, limit) } return r.SearchRepositories(remoteName, limit) } // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { s.mu.Lock() defer s.mu.Unlock() return newRepositoryInfo(s.config, name) } // APIEndpoint represents a remote API endpoint type APIEndpoint struct { Mirror bool URL *url.URL Version APIVersion AllowNondistributableArtifacts bool Official bool TrimHostname bool TLSConfig *tls.Config } // ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) } // TLSConfig constructs a client TLS configuration based on server defaults func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { s.mu.Lock() defer s.mu.Unlock() return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) } // tlsConfig constructs a client TLS configuration based on server defaults func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) } func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { return s.tlsConfig(mirrorURL.Host) } // LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. // It gives preference to v2 endpoints over v1, mirrors over the actual // registry, and HTTPS over plain HTTP. func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { s.mu.Lock() defer s.mu.Unlock() return s.lookupEndpoints(hostname) } // LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. // It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. // Mirrors are not included. func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { s.mu.Lock() defer s.mu.Unlock() allEndpoints, err := s.lookupEndpoints(hostname) if err == nil { for _, endpoint := range allEndpoints { if !endpoint.Mirror { endpoints = append(endpoints, endpoint) } } } return endpoints, err } func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { return s.lookupV2Endpoints(hostname) }
9,491
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/session.go
package registry // import "github.com/docker/docker/registry" import ( "bytes" "crypto/sha256" // this is required for some certificates _ "crypto/sha512" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "strconv" "strings" "sync" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry/resumable" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( // ErrRepoNotFound is returned if the repository didn't exist on the // remote side ErrRepoNotFound notFoundError = "Repository not found" ) // A Session is used to communicate with a V1 registry type Session struct { indexEndpoint *V1Endpoint client *http.Client // TODO(tiborvass): remove authConfig authConfig *types.AuthConfig id string } type authTransport struct { http.RoundTripper *types.AuthConfig alwaysSetBasicAuth bool token []string mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // AuthTransport handles the auth layer when communicating with a v1 registry (private or official) // // For private v1 registries, set alwaysSetBasicAuth to true. // // For the official v1 registry, if there isn't already an Authorization header in the request, // but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. // After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing // a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent // requests. // // If the server sends a token without the client having requested it, it is ignored. // // This RoundTripper also has a CancelRequest method important for correct timeout handling. func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { if base == nil { base = http.DefaultTransport } return &authTransport{ RoundTripper: base, AuthConfig: authConfig, alwaysSetBasicAuth: alwaysSetBasicAuth, modReq: make(map[*http.Request]*http.Request), } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } // RoundTrip changes an HTTP request's headers to add the necessary // authentication-related headers func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { // Authorization should not be set on 302 redirect for untrusted locations. // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. // As the authorization logic is currently implemented in RoundTrip, // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. // This is safe as Docker doesn't set Referrer in other scenarios. if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { return tr.RoundTripper.RoundTrip(orig) } req := cloneRequest(orig) tr.mu.Lock() tr.modReq[orig] = req tr.mu.Unlock() if tr.alwaysSetBasicAuth { if tr.AuthConfig == nil { return nil, errors.New("unexpected error: empty auth config") } req.SetBasicAuth(tr.Username, tr.Password) return tr.RoundTripper.RoundTrip(req) } // Don't override if req.Header.Get("Authorization") == "" { if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { req.SetBasicAuth(tr.Username, tr.Password) } else if len(tr.token) > 0 { req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) } } resp, err := tr.RoundTripper.RoundTrip(req) if err != nil { delete(tr.modReq, orig) return nil, err } if len(resp.Header["X-Docker-Token"]) > 0 { tr.token = resp.Header["X-Docker-Token"] } resp.Body = &ioutils.OnEOFReader{ Rc: resp.Body, Fn: func() { tr.mu.Lock() delete(tr.modReq, orig) tr.mu.Unlock() }, } return resp, nil } // CancelRequest cancels an in-flight request by closing its connection. func (tr *authTransport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := tr.RoundTripper.(canceler); ok { tr.mu.Lock() modReq := tr.modReq[req] delete(tr.modReq, req) tr.mu.Unlock() cr.CancelRequest(modReq) } } func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { var alwaysSetBasicAuth bool // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside all our requests. if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { info, err := endpoint.Ping() if err != nil { return err } if info.Standalone && authConfig != nil { logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) alwaysSetBasicAuth = true } } // Annotate the transport unconditionally so that v2 can // properly fallback on v1 when an image is not found. client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) jar, err := cookiejar.New(nil) if err != nil { return errors.New("cookiejar.New is not supposed to return an error") } client.Jar = jar return nil } func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { return &Session{ authConfig: authConfig, client: client, indexEndpoint: endpoint, id: stringid.GenerateRandomID(), } } // NewSession creates a new session // TODO(tiborvass): remove authConfig param once registry client v2 is vendored func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { if err := authorizeClient(client, authConfig, endpoint); err != nil { return nil, err } return newSession(client, authConfig, endpoint), nil } // ID returns this registry session's ID. func (r *Session) ID() string { return r.id } // GetRemoteHistory retrieves the history of a given image from the registry. // It returns a list of the parent's JSON files (including the requested image). func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } var history []string if err := json.NewDecoder(res.Body).Decode(&history); err != nil { return nil, fmt.Errorf("Error while reading the http response: %v", err) } logrus.Debugf("Ancestry: %v", history) return history, nil } // LookupRemoteImage checks if an image exists in the registry func (r *Session) LookupRemoteImage(imgID, registry string) error { res, err := r.client.Get(registry + "images/" + imgID + "/json") if err != nil { return err } res.Body.Close() if res.StatusCode != 200 { return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } // GetRemoteImageJSON retrieves an image's JSON metadata from the registry. func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { res, err := r.client.Get(registry + "images/" + imgID + "/json") if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } defer res.Body.Close() if res.StatusCode != 200 { return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := int64(-1) if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { imageSize, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, -1, err } } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) } return jsonString, imageSize, nil } // GetRemoteImageLayer retrieves an image layer from the registry func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { var ( statusCode = 0 res *http.Response err error imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) ) req, err := http.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } res, err = r.client.Do(req) if err != nil { logrus.Debugf("Error contacting registry %s: %v", registry, err) // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", statusCode, imgID) } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { logrus.Debug("server supports resume") return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil } logrus.Debug("server doesn't support resume") return res.Body, nil } // GetRemoteTag retrieves the tag named in the askedTag argument from the given // repository. It queries each of the registries supplied in the registries // argument, and returns data from the first one that answers the query // successfully. func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { repository := reference.Path(repositoryRef) if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) res, err := r.client.Get(endpoint) if err != nil { return "", err } logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode == 404 { return "", ErrRepoNotFound } if res.StatusCode != 200 { continue } var tagID string if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { return "", err } return tagID, nil } return "", fmt.Errorf("Could not reach any registry endpoint") } // GetRemoteTags retrieves all tags from the given repository. It queries each // of the registries supplied in the registries argument, and returns data from // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { repository := reference.Path(repositoryRef) if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) res, err := r.client.Get(endpoint) if err != nil { return nil, err } logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode == 404 { return nil, ErrRepoNotFound } if res.StatusCode != 200 { continue } result := make(map[string]string) if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil } return nil, fmt.Errorf("Could not reach any registry endpoint") } func buildEndpointsList(headers []string, indexEp string) ([]string, error) { var endpoints []string parsedURL, err := url.Parse(indexEp) if err != nil { return nil, err } var urlScheme = parsedURL.Scheme // The registry's URL scheme has to match the Index' for _, ep := range headers { epList := strings.Split(ep, ",") for _, epListElement := range epList { endpoints = append( endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) } } return endpoints, nil } // GetRepositoryData returns lists of images and endpoints for the repository func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) logrus.Debugf("[registry] Calling GET %s", repositoryTarget) req, err := http.NewRequest("GET", repositoryTarget, nil) if err != nil { return nil, err } // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { // check if the error is because of i/o timeout // and return a non-obtuse error message for users // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" // was a top search on the docker user forum if isTimeout(err) { return nil, fmt.Errorf("network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy", repositoryTarget) } return nil, fmt.Errorf("Error while pulling image: %v", err) } defer res.Body.Close() if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) } var endpoints []string if res.Header.Get("X-Docker-Endpoints") != "" { endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) if err != nil { return nil, err } } else { // Assume the endpoint is on the same host endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) } remoteChecksums := []*ImgData{} if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { return nil, err } // Forge a better object from the retrieved data imgsData := make(map[string]*ImgData, len(remoteChecksums)) for _, elem := range remoteChecksums { imgsData[elem.ID] = elem } return &RepositoryData{ ImgList: imgsData, Endpoints: endpoints, }, nil } // PushImageChecksumRegistry uploads checksums for an image func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { u := registry + "images/" + imgData.ID + "/checksum" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, nil) if err != nil { return err } req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %v", err) } defer res.Body.Close() if len(res.Cookies()) > 0 { r.client.Jar.SetCookies(req.URL, res.Cookies()) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) } return nil } // PushImageJSONRegistry pushes JSON metadata for a local image to the registry func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { u := registry + "images/" + imgData.ID + "/json" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) if err != nil { return err } req.Header.Add("Content-type", "application/json") res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } // PushImageLayerRegistry sends the checksum of an image layer to the registry func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { u := registry + "images/" + imgID + "/layer" logrus.Debugf("[registry] Calling PUT %s", u) tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) if err != nil { return "", "", err } h := sha256.New() h.Write(jsonRaw) h.Write([]byte{'\n'}) checksumLayer := io.TeeReader(tarsumLayer, h) req, err := http.NewRequest("PUT", u, checksumLayer) if err != nil { return "", "", err } req.Header.Add("Content-Type", "application/octet-stream") req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} res, err := r.client.Do(req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %v", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { return "", "", err } } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) return tarsumLayer.Sum(jsonRaw), checksumPayload, nil } // PushRegistryTag pushes a tag on the registry. // Remote has the format '<user>/<repo> func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { // "jsonify" the string revision = "\"" + revision + "\"" path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { return err } req.Header.Add("Content-type", "application/json") req.ContentLength = int64(len(revision)) res, err := r.client.Do(req) if err != nil { return err } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) } return nil } // PushImageJSONIndex uploads an image list to the repository func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} if validate { for _, elem := range imgList { if elem.Checksum != "" { cleanImgList = append(cleanImgList, elem) } } } else { cleanImgList = imgList } imgListJSON, err := json.Marshal(cleanImgList) if err != nil { return nil, err } var suffix string if validate { suffix = "images" } u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) logrus.Debugf("[registry] PUT %s", u) logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ "Content-type": {"application/json"}, // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests "X-Docker-Token": {"true"}, } if validate { headers["X-Docker-Endpoints"] = regs } // Redirect if necessary var res *http.Response for { if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { return nil, err } if !shouldRedirect(res) { break } res.Body.Close() u = res.Header.Get("Location") logrus.Debugf("Redirected to %s", u) } defer res.Body.Close() if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } var tokens, endpoints []string if !validate { if res.StatusCode != 200 && res.StatusCode != 201 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) if res.Header.Get("X-Docker-Endpoints") == "" { return nil, fmt.Errorf("Index response didn't contain any endpoints") } endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) if err != nil { return nil, err } } else { if res.StatusCode != 204 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } } return &RepositoryData{ Endpoints: endpoints, }, nil } func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) if err != nil { return nil, err } req.ContentLength = int64(len(body)) for k, v := range headers { req.Header[k] = v } response, err := r.client.Do(req) if err != nil { return nil, err } return response, nil } func shouldRedirect(response *http.Response) bool { return response.StatusCode >= 300 && response.StatusCode < 400 } // SearchRepositories performs a search against the remote repository func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { if limit < 1 || limit > 100 { return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) } logrus.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") } // Have the AuthTransport send authentication, when logged in. req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { return nil, errdefs.System(err) } defer res.Body.Close() if res.StatusCode != 200 { return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(registrytypes.SearchResults) return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") } func isTimeout(err error) bool { type timeout interface { Timeout() bool } e := err switch urlErr := err.(type) { case *url.Error: e = urlErr.Err } t, ok := e.(timeout) return ok && t.Timeout() } func newJSONError(msg string, res *http.Response) error { return &jsonmessage.JSONError{ Message: msg, Code: res.StatusCode, } }
9,492
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/errors.go
package registry // import "github.com/docker/docker/registry" import ( "net/url" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/errdefs" ) type notFoundError string func (e notFoundError) Error() string { return string(e) } func (notFoundError) NotFound() {} func translateV2AuthError(err error) error { switch e := err.(type) { case *url.Error: switch e2 := e.Err.(type) { case errcode.Error: switch e2.Code { case errcode.ErrorCodeUnauthorized: return errdefs.Unauthorized(err) } } } return err }
9,493
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/config_unix.go
// +build !windows package registry // import "github.com/docker/docker/registry" var ( // CertsDir is the directory where certificates are stored CertsDir = "/etc/docker/certs.d" ) // cleanPath is used to ensure that a directory name is valid on the target // platform. It will be passed in something *similar* to a URL such as // https:/index.docker.io/v1. Not all platforms support directory names // which contain those characters (such as : on Windows) func cleanPath(s string) string { return s }
9,494
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/endpoint_v1.go
package registry // import "github.com/docker/docker/registry" import ( "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net/http" "net/url" "strings" "github.com/docker/distribution/registry/client/transport" registrytypes "github.com/docker/docker/api/types/registry" "github.com/sirupsen/logrus" ) // V1Endpoint stores basic information about a V1 registry endpoint. type V1Endpoint struct { client *http.Client URL *url.URL IsSecure bool } // NewV1Endpoint parses the given address to return a registry endpoint. func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { tlsConfig, err := newTLSConfig(index.Name, index.Secure) if err != nil { return nil, err } endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) if err != nil { return nil, err } if err := validateEndpoint(endpoint); err != nil { return nil, err } return endpoint, nil } func validateEndpoint(endpoint *V1Endpoint) error { logrus.Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { if endpoint.IsSecure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } // If registry is insecure and HTTPS failed, fallback to HTTP. logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) endpoint.URL.Scheme = "http" var err2 error if _, err2 = endpoint.Ping(); err2 == nil { return nil } return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return nil } func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { endpoint := &V1Endpoint{ IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, URL: new(url.URL), } *endpoint.URL = address // TODO(tiborvass): make sure a ConnectTimeout transport is used tr := NewTransport(tlsConfig) endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) return endpoint } // trimV1Address trims the version off the address and returns the // trimmed address or an error if there is a non-V1 version. func trimV1Address(address string) (string, error) { var ( chunks []string apiVersionStr string ) if strings.HasSuffix(address, "/") { address = address[:len(address)-1] } chunks = strings.Split(address, "/") apiVersionStr = chunks[len(chunks)-1] if apiVersionStr == "v1" { return strings.Join(chunks[:len(chunks)-1], "/"), nil } for k, v := range apiVersions { if k != APIVersion1 && apiVersionStr == v { return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) } } return address, nil } func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { address = "https://" + address } address, err := trimV1Address(address) if err != nil { return nil, err } uri, err := url.Parse(address) if err != nil { return nil, err } endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) if err != nil { return nil, err } return endpoint, nil } // Get the formatted URL for the root of this registry Endpoint func (e *V1Endpoint) String() string { return e.URL.String() + "/v1/" } // Path returns a formatted string for the URL // of this endpoint with the given path appended. func (e *V1Endpoint) Path(path string) string { return e.URL.String() + "/v1/" + path } // Ping returns a PingResult which indicates whether the registry is standalone or not. func (e *V1Endpoint) Ping() (PingResult, error) { logrus.Debugf("attempting v1 ping for registry endpoint %s", e) if e.String() == IndexServer { // Skip the check, we know this one is valid // (and we never want to fallback to http in case of error) return PingResult{Standalone: false}, nil } req, err := http.NewRequest("GET", e.Path("_ping"), nil) if err != nil { return PingResult{Standalone: false}, err } resp, err := e.client.Do(req) if err != nil { return PingResult{Standalone: false}, err } defer resp.Body.Close() jsonString, err := ioutil.ReadAll(resp.Body) if err != nil { return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) } // If the header is absent, we assume true for compatibility with earlier // versions of the registry. default to true info := PingResult{ Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { logrus.Debugf("Registry version header: '%s'", hdr) info.Version = hdr } logrus.Debugf("PingResult.Version: %q", info.Version) standalone := resp.Header.Get("X-Docker-Registry-Standalone") logrus.Debugf("Registry standalone header: '%s'", standalone) // Accepted values are "true" (case-insensitive) and "1". if strings.EqualFold(standalone, "true") || standalone == "1" { info.Standalone = true } else if len(standalone) > 0 { // there is a header set, and it is not "true" or "1", so assume fails info.Standalone = false } logrus.Debugf("PingResult.Standalone: %t", info.Standalone) return info, nil }
9,495
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go
package resumable // import "github.com/docker/docker/registry/resumable" import ( "fmt" "io" "net/http" "time" "github.com/sirupsen/logrus" ) type requestReader struct { client *http.Client request *http.Request lastRange int64 totalSize int64 currentResponse *http.Response failures uint32 maxFailures uint32 waitDuration time.Duration } // NewRequestReader makes it possible to resume reading a request's body transparently // maxfail is the number of times we retry to make requests again (not resumes) // totalsize is the total length of the body; auto detect if not provided func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} } // NewRequestReaderWithInitialResponse makes it possible to resume // reading the body of an already initiated request. func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} } func (r *requestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { return 0, fmt.Errorf("client and request can't be nil") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) r.request.Header.Set("Range", readRange) time.Sleep(r.waitDuration) } if r.currentResponse == nil { r.currentResponse, err = r.client.Do(r.request) isFreshRequest = true } if err != nil && r.failures+1 != r.maxFailures { r.cleanUpResponse() r.failures++ time.Sleep(time.Duration(r.failures) * r.waitDuration) return 0, nil } else if err != nil { r.cleanUpResponse() return 0, err } if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { r.cleanUpResponse() return 0, io.EOF } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { r.cleanUpResponse() return 0, fmt.Errorf("the server doesn't support byte ranges") } if r.totalSize == 0 { r.totalSize = r.currentResponse.ContentLength } else if r.totalSize <= 0 { r.cleanUpResponse() return 0, fmt.Errorf("failed to auto detect content length") } n, err = r.currentResponse.Body.Read(p) r.lastRange += int64(n) if err != nil { r.cleanUpResponse() } if err != nil && err != io.EOF { logrus.Infof("encountered error during pull and clearing it before resume: %s", err) err = nil } return n, err } func (r *requestReader) Close() error { r.cleanUpResponse() r.client = nil r.request = nil return nil } func (r *requestReader) cleanUpResponse() { if r.currentResponse != nil { r.currentResponse.Body.Close() r.currentResponse = nil } }
9,496
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/seccomp.go
package types // import "github.com/docker/docker/api/types" // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { DefaultAction Action `json:"defaultAction"` // Architectures is kept to maintain backward compatibility with the old // seccomp profile. Architectures []Arch `json:"architectures,omitempty"` ArchMap []Architecture `json:"archMap,omitempty"` Syscalls []*Syscall `json:"syscalls"` } // Architecture is used to represent a specific architecture // and its sub-architectures type Architecture struct { Arch Arch `json:"architecture"` SubArches []Arch `json:"subArchitectures"` } // Arch used for architectures type Arch string // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( ArchX86 Arch = "SCMP_ARCH_X86" ArchX86_64 Arch = "SCMP_ARCH_X86_64" ArchX32 Arch = "SCMP_ARCH_X32" ArchARM Arch = "SCMP_ARCH_ARM" ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" ArchMIPS Arch = "SCMP_ARCH_MIPS" ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" ArchPPC Arch = "SCMP_ARCH_PPC" ArchPPC64 Arch = "SCMP_ARCH_PPC64" ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" ArchS390 Arch = "SCMP_ARCH_S390" ArchS390X Arch = "SCMP_ARCH_S390X" ) // Action taken upon Seccomp rule match type Action string // Define actions for Seccomp rules const ( ActKill Action = "SCMP_ACT_KILL" ActTrap Action = "SCMP_ACT_TRAP" ActErrno Action = "SCMP_ACT_ERRNO" ActTrace Action = "SCMP_ACT_TRACE" ActAllow Action = "SCMP_ACT_ALLOW" ) // Operator used to match syscall arguments in Seccomp type Operator string // Define operators for syscall arguments in Seccomp const ( OpNotEqual Operator = "SCMP_CMP_NE" OpLessThan Operator = "SCMP_CMP_LT" OpLessEqual Operator = "SCMP_CMP_LE" OpEqualTo Operator = "SCMP_CMP_EQ" OpGreaterEqual Operator = "SCMP_CMP_GE" OpGreaterThan Operator = "SCMP_CMP_GT" OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" ) // Arg used for matching specific syscall arguments in Seccomp type Arg struct { Index uint `json:"index"` Value uint64 `json:"value"` ValueTwo uint64 `json:"valueTwo"` Op Operator `json:"op"` } // Filter is used to conditionally apply Seccomp rules type Filter struct { Caps []string `json:"caps,omitempty"` Arches []string `json:"arches,omitempty"` MinKernel string `json:"minKernel,omitempty"` } // Syscall is used to match a group of syscalls in Seccomp type Syscall struct { Name string `json:"name,omitempty"` Names []string `json:"names,omitempty"` Action Action `json:"action"` Args []*Arg `json:"args"` Comment string `json:"comment"` Includes Filter `json:"includes"` Excludes Filter `json:"excludes"` }
9,497
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin_device.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // PluginDevice plugin device // swagger:model PluginDevice type PluginDevice struct { // description // Required: true Description string `json:"Description"` // name // Required: true Name string `json:"Name"` // path // Required: true Path *string `json:"Path"` // settable // Required: true Settable []string `json:"Settable"` }
9,498
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/client.go
package types // import "github.com/docker/docker/api/types" import ( "bufio" "io" "net" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container type CheckpointCreateOptions struct { CheckpointID string CheckpointDir string Exit bool } // CheckpointListOptions holds parameters to list checkpoints for a container type CheckpointListOptions struct { CheckpointDir string } // CheckpointDeleteOptions holds parameters to delete a checkpoint from a container type CheckpointDeleteOptions struct { CheckpointID string CheckpointDir string } // ContainerAttachOptions holds parameters to attach to a container. type ContainerAttachOptions struct { Stream bool Stdin bool Stdout bool Stderr bool DetachKeys string Logs bool } // ContainerCommitOptions holds parameters to commit changes into a container. type ContainerCommitOptions struct { Reference string Comment string Author string Changes []string Pause bool Config *container.Config } // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { ExecID string ContainerID string Running bool ExitCode int Pid int } // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { Quiet bool Size bool All bool Latest bool Since string Before string Limit int Filters filters.Args } // ContainerLogsOptions holds parameters to filter logs with. type ContainerLogsOptions struct { ShowStdout bool ShowStderr bool Since string Until string Timestamps bool Follow bool Tail string Details bool } // ContainerRemoveOptions holds parameters to remove containers. type ContainerRemoveOptions struct { RemoveVolumes bool RemoveLinks bool Force bool } // ContainerStartOptions holds parameters to start containers. type ContainerStartOptions struct { CheckpointID string CheckpointDir string } // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { AllowOverwriteDirWithFile bool CopyUIDGID bool } // EventsOptions holds parameters to filter events with. type EventsOptions struct { Since string Until string Filters filters.Args } // NetworkListOptions holds parameters to filter the list of networks with. type NetworkListOptions struct { Filters filters.Args } // HijackedResponse holds connection information for a hijacked request. type HijackedResponse struct { Conn net.Conn Reader *bufio.Reader } // Close closes the hijacked connection and reader. func (h *HijackedResponse) Close() { h.Conn.Close() } // CloseWriter is an interface that implements structs // that close input streams to prevent from writing. type CloseWriter interface { CloseWrite() error } // CloseWrite closes a readWriter for writing. func (h *HijackedResponse) CloseWrite() error { if conn, ok := h.Conn.(CloseWriter); ok { return conn.CloseWrite() } return nil } // ImageBuildOptions holds the information // necessary to build images. type ImageBuildOptions struct { Tags []string SuppressOutput bool RemoteContext string NoCache bool Remove bool ForceRemove bool PullParent bool Isolation container.Isolation CPUSetCPUs string CPUSetMems string CPUShares int64 CPUQuota int64 CPUPeriod int64 Memory int64 MemorySwap int64 CgroupParent string NetworkMode string ShmSize int64 Dockerfile string Ulimits []*units.Ulimit // BuildArgs needs to be a *string instead of just a string so that // we can tell the difference between "" (empty string) and no value // at all (nil). See the parsing of buildArgs in // api/server/router/build/build_routes.go for even more info. BuildArgs map[string]*string AuthConfigs map[string]AuthConfig Context io.Reader Labels map[string]string // squash the resulting image's layers to the parent // preserves the original image and creates a new one from the parent with all // the changes applied to a single layer Squash bool // CacheFrom specifies images that are used for matching cache. Images // specified here do not need to have a valid parent chain to match cache. CacheFrom []string SecurityOpt []string ExtraHosts []string // List of extra hosts Target string SessionID string Platform string // Version specifies the version of the unerlying builder to use Version BuilderVersion // BuildID is an optional identifier that can be passed together with the // build request. The same identifier can be used to gracefully cancel the // build with the cancel request. BuildID string // Outputs defines configurations for exporting build results. Only supported // in BuildKit mode Outputs []ImageBuildOutput } // ImageBuildOutput defines configuration for exporting a build result type ImageBuildOutput struct { Type string Attrs map[string]string } // BuilderVersion sets the version of underlying builder to use type BuilderVersion string const ( // BuilderV1 is the first generation builder in docker daemon BuilderV1 BuilderVersion = "1" // BuilderBuildKit is builder based on moby/buildkit project BuilderBuildKit = "2" ) // ImageBuildResponse holds information // returned by a server after building // an image. type ImageBuildResponse struct { Body io.ReadCloser OSType string } // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. } // ImageImportSource holds source information for ImageImport type ImageImportSource struct { Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. } // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { Tag string // Tag is the name to tag this image with. This attribute is deprecated. Message string // Message is the message to tag the image with Changes []string // Changes are the raw changes to apply to this image Platform string // Platform is the target platform of the image } // ImageListOptions holds parameters to filter the list of images with. type ImageListOptions struct { All bool Filters filters.Args } // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { // Body must be closed to avoid a resource leak Body io.ReadCloser JSON bool } // ImagePullOptions holds information to pull images. type ImagePullOptions struct { All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc Platform string } // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. // This function returns the registry authentication // header value in base 64 format, or an error // if the privilege request fails. type RequestPrivilegeFunc func() (string, error) //ImagePushOptions holds information to push images. type ImagePushOptions ImagePullOptions // ImageRemoveOptions holds parameters to remove images. type ImageRemoveOptions struct { Force bool PruneChildren bool } // ImageSearchOptions holds parameters to search images with. type ImageSearchOptions struct { RegistryAuth string PrivilegeFunc RequestPrivilegeFunc Filters filters.Args Limit int } // ResizeOptions holds parameters to resize a tty. // It can be used to resize container ttys and // exec process ttys too. type ResizeOptions struct { Height uint Width uint } // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args } // NodeRemoveOptions holds parameters to remove nodes with. type NodeRemoveOptions struct { Force bool } // ServiceCreateOptions contains the options to use when creating a service. type ServiceCreateOptions struct { // EncodedRegistryAuth is the encoded registry authorization credentials to // use when updating the service. // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string // QueryRegistry indicates whether the service update requires // contacting a registry. A registry may be contacted to retrieve // the image digest and manifest, which in turn can be used to update // platform or other information about the service. QueryRegistry bool } // ServiceCreateResponse contains the information returned to a client // on the creation of a new service. type ServiceCreateResponse struct { // ID is the ID of the created service. ID string // Warnings is a set of non-fatal warning messages to pass on to the user. Warnings []string `json:",omitempty"` } // Values for RegistryAuthFrom in ServiceUpdateOptions const ( RegistryAuthFromSpec = "spec" RegistryAuthFromPreviousSpec = "previous-spec" ) // ServiceUpdateOptions contains the options to be used for updating services. type ServiceUpdateOptions struct { // EncodedRegistryAuth is the encoded registry authorization credentials to // use when updating the service. // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate // into this field. While it does open API users up to racy writes, most // users may not need that level of consistency in practice. // RegistryAuthFrom specifies where to find the registry authorization // credentials if they are not given in EncodedRegistryAuth. Valid // values are "spec" and "previous-spec". RegistryAuthFrom string // Rollback indicates whether a server-side rollback should be // performed. When this is set, the provided spec will be ignored. // The valid values are "previous" and "none". An empty value is the // same as "none". Rollback string // QueryRegistry indicates whether the service update requires // contacting a registry. A registry may be contacted to retrieve // the image digest and manifest, which in turn can be used to update // platform or other information about the service. QueryRegistry bool } // ServiceListOptions holds parameters to list services with. type ServiceListOptions struct { Filters filters.Args } // ServiceInspectOptions holds parameters related to the "service inspect" // operation. type ServiceInspectOptions struct { InsertDefaults bool } // TaskListOptions holds parameters to list tasks with. type TaskListOptions struct { Filters filters.Args } // PluginRemoveOptions holds parameters to remove plugins. type PluginRemoveOptions struct { Force bool } // PluginEnableOptions holds parameters to enable plugins. type PluginEnableOptions struct { Timeout int } // PluginDisableOptions holds parameters to disable plugins. type PluginDisableOptions struct { Force bool } // PluginInstallOptions holds parameters to install a plugin. type PluginInstallOptions struct { Disabled bool AcceptAllPermissions bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry RemoteRef string // RemoteRef is the plugin name on the registry PrivilegeFunc RequestPrivilegeFunc AcceptPermissionsFunc func(PluginPrivileges) (bool, error) Args []string } // SwarmUnlockKeyResponse contains the response for Engine API: // GET /swarm/unlockkey type SwarmUnlockKeyResponse struct { // UnlockKey is the unlock key in ASCII-armored format. UnlockKey string } // PluginCreateOptions hold all options to plugin create. type PluginCreateOptions struct { RepoName string }
9,499