index
int64
0
0
repo_id
stringlengths
21
232
file_path
stringlengths
34
259
content
stringlengths
1
14.1M
__index_level_0__
int64
0
10k
0
kubeflow_public_repos/fate-operator/vendor/github.com/Masterminds
kubeflow_public_repos/fate-operator/vendor/github.com/Masterminds/squirrel/delete_ctx.go
// +build go1.8 package squirrel import ( "context" "database/sql" "github.com/lann/builder" ) func (d *deleteData) ExecContext(ctx context.Context) (sql.Result, error) { if d.RunWith == nil { return nil, RunnerNotSet } ctxRunner, ok := d.RunWith.(ExecerContext) if !ok { return nil, NoContextSupport } return ExecContextWith(ctx, ctxRunner, d) } func (d *deleteData) QueryContext(ctx context.Context) (*sql.Rows, error) { if d.RunWith == nil { return nil, RunnerNotSet } ctxRunner, ok := d.RunWith.(QueryerContext) if !ok { return nil, NoContextSupport } return QueryContextWith(ctx, ctxRunner, d) } func (d *deleteData) QueryRowContext(ctx context.Context) RowScanner { if d.RunWith == nil { return &Row{err: RunnerNotSet} } queryRower, ok := d.RunWith.(QueryRowerContext) if !ok { if _, ok := d.RunWith.(QueryerContext); !ok { return &Row{err: RunnerNotQueryRunner} } return &Row{err: NoContextSupport} } return QueryRowContextWith(ctx, queryRower, d) } // ExecContext builds and ExecContexts the query with the Runner set by RunWith. func (b DeleteBuilder) ExecContext(ctx context.Context) (sql.Result, error) { data := builder.GetStruct(b).(deleteData) return data.ExecContext(ctx) } // QueryContext builds and QueryContexts the query with the Runner set by RunWith. func (b DeleteBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { data := builder.GetStruct(b).(deleteData) return data.QueryContext(ctx) } // QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. func (b DeleteBuilder) QueryRowContext(ctx context.Context) RowScanner { data := builder.GetStruct(b).(deleteData) return data.QueryRowContext(ctx) } // ScanContext is a shortcut for QueryRowContext().Scan. func (b DeleteBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { return b.QueryRowContext(ctx).Scan(dest...) }
9,300
0
kubeflow_public_repos/fate-operator/vendor/github.com/Masterminds
kubeflow_public_repos/fate-operator/vendor/github.com/Masterminds/squirrel/.travis.yml
language: go go: - 1.11.x - 1.12.x - 1.13.x services: - mysql - postgresql # Setting sudo access to false will let Travis CI use containers rather than # VMs to run the tests. For more details see: # - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ # - http://docs.travis-ci.com/user/workers/standard-infrastructure/ sudo: false before_script: - mysql -e 'CREATE DATABASE squirrel;' - psql -c 'CREATE DATABASE squirrel;' -U postgres script: - go test - cd integration - go test -args -driver sqlite3 - go test -args -driver mysql -dataSource travis@/squirrel - go test -args -driver postgres -dataSource 'postgres://postgres@localhost/squirrel?sslmode=disable' notifications: irc: "irc.freenode.net#masterminds"
9,301
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/termios_bsd.go
// +build darwin freebsd openbsd netbsd package term // import "github.com/moby/term" import ( "unsafe" "golang.org/x/sys/unix" ) const ( getTermios = unix.TIOCGETA setTermios = unix.TIOCSETA ) // Termios is the Unix API for terminal I/O. type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) newState.Oflag &^= unix.OPOST newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) newState.Cflag &^= (unix.CSIZE | unix.PARENB) newState.Cflag |= unix.CS8 newState.Cc[unix.VMIN] = 1 newState.Cc[unix.VTIME] = 0 if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil }
9,302
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/go.mod
module github.com/moby/term go 1.13 require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/google/go-cmp v0.3.1 github.com/pkg/errors v0.9.1 // indirect github.com/sirupsen/logrus v1.4.2 golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.0.2 // indirect )
9,303
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,304
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/winsize.go
// +build !windows package term // import "github.com/moby/term" import ( "golang.org/x/sys/unix" ) // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} return ws, err } // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) }
9,305
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/term_windows.go
package term // import "github.com/moby/term" import ( "io" "os" "os/signal" "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" windowsconsole "github.com/moby/term/windows" ) // State holds the console mode for the terminal. type State struct { mode uint32 } // Winsize is used for window size. type Winsize struct { Height uint16 Width uint16 } // vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool // StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. var emulateStdin, emulateStdout, emulateStderr bool fd := os.Stdin.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true } // Unconditionally set the console mode back even on failure because SetConsoleMode // remembers invalid bits on input handles. winterm.SetConsoleMode(fd, mode) } fd = os.Stdout.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } fd = os.Stderr.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as // go-ansiterm hasn't switch to x/sys/windows. // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } return } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return windowsconsole.GetHandleInfo(in) } // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err } winsize := &Winsize{ Width: uint16(info.Window.Right - info.Window.Left + 1), Height: uint16(info.Window.Bottom - info.Window.Top + 1), } return winsize, nil } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return windowsconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { return winterm.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { mode, e := winterm.GetConsoleMode(fd) if e != nil { return nil, e } return &State{mode: mode}, nil } // DisableEcho disables echo for the terminal connected to the given file descriptor. // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode mode &^= winterm.ENABLE_ECHO_INPUT mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT err := winterm.SetConsoleMode(fd, mode) if err != nil { return err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into // raw mode and returns the previous state. On UNIX, this puts both the input // and output into raw mode. On Windows, it only puts the input into raw mode. func SetRawTerminal(fd uintptr) (*State, error) { state, err := MakeRaw(fd) if err != nil { return nil, err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return state, err } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. func SetRawTerminalOutput(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) return state, err } // MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be restored. func MakeRaw(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } mode := state.mode // See // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx // Disable these modes mode &^= winterm.ENABLE_ECHO_INPUT mode &^= winterm.ENABLE_LINE_INPUT mode &^= winterm.ENABLE_MOUSE_INPUT mode &^= winterm.ENABLE_WINDOW_INPUT mode &^= winterm.ENABLE_PROCESSED_INPUT // Enable these modes mode |= winterm.ENABLE_EXTENDED_FLAGS mode |= winterm.ENABLE_INSERT_MODE mode |= winterm.ENABLE_QUICK_EDIT_MODE if vtInputSupported { mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT } err = winterm.SetConsoleMode(fd, mode) if err != nil { return nil, err } return state, nil } func restoreAtInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() }
9,306
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/proxy.go
package term // import "github.com/moby/term" import ( "io" ) // EscapeError is special error which returned by a TTY proxy reader's Read() // method in case its detach escape sequence is read. type EscapeError struct{} func (EscapeError) Error() string { return "read escape sequence" } // escapeProxy is used only for attaches with a TTY. It is used to proxy // stdin keypresses from the underlying reader and look for the passed in // escape key sequence to signal a detach. type escapeProxy struct { escapeKeys []byte escapeKeyPos int r io.Reader } // NewEscapeProxy returns a new TTY proxy reader which wraps the given reader // and detects when the specified escape keys are read, in which case the Read // method will return an error of type EscapeError. func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { return &escapeProxy{ escapeKeys: escapeKeys, r: r, } } func (r *escapeProxy) Read(buf []byte) (int, error) { nr, err := r.r.Read(buf) if len(r.escapeKeys) == 0 { return nr, err } preserve := func() { // this preserves the original key presses in the passed in buffer nr += r.escapeKeyPos preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) preserve = append(preserve, buf...) r.escapeKeyPos = 0 copy(buf[0:nr], preserve) } if nr != 1 || err != nil { if r.escapeKeyPos > 0 { preserve() } return nr, err } if buf[0] != r.escapeKeys[r.escapeKeyPos] { if r.escapeKeyPos > 0 { preserve() } return nr, nil } if r.escapeKeyPos == len(r.escapeKeys)-1 { return 0, EscapeError{} } // Looks like we've got an escape key, but we need to match again on the next // read. // Store the current escape key we found so we can look for the next one on // the next read. // Since this is an escape key, make sure we don't let the caller read it // If later on we find that this is not the escape sequence, we'll add the // keys back r.escapeKeyPos++ return nr - r.escapeKeyPos, nil }
9,307
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/tc.go
// +build !windows package term // import "github.com/moby/term" import ( "syscall" "unsafe" "golang.org/x/sys/unix" ) func tcget(fd uintptr, p *Termios) syscall.Errno { _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) return err } func tcset(fd uintptr, p *Termios) syscall.Errno { _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) return err }
9,308
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/ascii.go
package term // import "github.com/moby/term" import ( "fmt" "strings" ) // ASCII list the possible supported ASCII key sequence var ASCII = []string{ "ctrl-@", "ctrl-a", "ctrl-b", "ctrl-c", "ctrl-d", "ctrl-e", "ctrl-f", "ctrl-g", "ctrl-h", "ctrl-i", "ctrl-j", "ctrl-k", "ctrl-l", "ctrl-m", "ctrl-n", "ctrl-o", "ctrl-p", "ctrl-q", "ctrl-r", "ctrl-s", "ctrl-t", "ctrl-u", "ctrl-v", "ctrl-w", "ctrl-x", "ctrl-y", "ctrl-z", "ctrl-[", "ctrl-\\", "ctrl-]", "ctrl-^", "ctrl-_", } // ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. func ToBytes(keys string) ([]byte, error) { codes := []byte{} next: for _, key := range strings.Split(keys, ",") { if len(key) != 1 { for code, ctrl := range ASCII { if ctrl == key { codes = append(codes, byte(code)) continue next } } if key == "DEL" { codes = append(codes, 127) } else { return nil, fmt.Errorf("Unknown character: '%s'", key) } } else { codes = append(codes, key[0]) } } return codes, nil }
9,309
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/term.go
// +build !windows // Package term provides structures and helper functions to work with // terminal (state, sizes). package term // import "github.com/moby/term" import ( "errors" "fmt" "io" "os" "os/signal" "golang.org/x/sys/unix" ) var ( // ErrInvalidState is returned if the state of the terminal is invalid. ErrInvalidState = errors.New("Invalid terminal state") ) // State represents the state of the terminal. type State struct { termios Termios } // Winsize represents the size of the terminal window. type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } // StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { return os.Stdin, os.Stdout, os.Stderr } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { var inFd uintptr var isTerminalIn bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = IsTerminal(inFd) } return inFd, isTerminalIn } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios return tcget(fd, &termios) == 0 } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } if err := tcset(fd, &state.termios); err != 0 { return err } return nil } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } return &oldState, nil } // DisableEcho applies the specified state to the terminal connected to the file // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= unix.ECHO if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into // raw mode and returns the previous state. On UNIX, this puts both the input // and output into raw mode. On Windows, it only puts the input into raw mode. func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { return nil, err } handleInterrupt(fd, oldState) return oldState, err } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. func SetRawTerminalOutput(fd uintptr) (*State, error) { return nil, nil } func handleInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { for range sigchan { // quit cleanly and the new terminal item is on a new line fmt.Println() signal.Stop(sigchan) close(sigchan) RestoreTerminal(fd, state) os.Exit(1) } }() }
9,310
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/go.sum
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
9,311
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/termios_linux.go
package term // import "github.com/moby/term" import ( "golang.org/x/sys/unix" ) const ( getTermios = unix.TCGETS setTermios = unix.TCSETS ) // Termios is the Unix API for terminal I/O. type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { termios, err := unix.IoctlGetTermios(int(fd), getTermios) if err != nil { return nil, err } var oldState State oldState.termios = Termios(*termios) termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) termios.Oflag &^= unix.OPOST termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) termios.Cflag &^= (unix.CSIZE | unix.PARENB) termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { return nil, err } return &oldState, nil }
9,312
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/windows/ansi_reader.go
// +build windows package windowsconsole // import "github.com/moby/term/windows" import ( "bytes" "errors" "fmt" "io" "os" "strings" "unsafe" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" ) const ( escapeSequence = ansiterm.KEY_ESC_CSI ) // ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. type ansiReader struct { file *os.File fd uintptr buffer []byte cbBuffer int command []byte } // NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a // Windows console input handle. func NewAnsiReader(nFile int) io.ReadCloser { initLogger() file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, fd: fd, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), buffer: make([]byte, 0), } } // Close closes the wrapped file. func (ar *ansiReader) Close() (err error) { return ar.file.Close() } // Fd returns the file descriptor of the wrapped file. func (ar *ansiReader) Fd() uintptr { return ar.fd } // Read reads up to len(p) bytes of translated input events into p. func (ar *ansiReader) Read(p []byte) (int, error) { if len(p) == 0 { return 0, nil } // Previously read bytes exist, read as much as we can and return if len(ar.buffer) > 0 { logger.Debugf("Reading previously cached bytes") originalLength := len(ar.buffer) copiedLength := copy(p, ar.buffer) if copiedLength == originalLength { ar.buffer = make([]byte, 0, len(p)) } else { ar.buffer = ar.buffer[copiedLength:] } logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) return copiedLength, nil } // Read and translate key events events, err := readInputEvents(ar.fd, len(p)) if err != nil { return 0, err } else if len(events) == 0 { logger.Debug("No input events detected") return 0, nil } keyBytes := translateKeyEvents(events, []byte(escapeSequence)) // Save excess bytes and right-size keyBytes if len(keyBytes) > len(p) { logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { logger.Debug("No key bytes returned from the translator") return 0, nil } copiedLength := copy(p, keyBytes) if copiedLength != len(keyBytes) { return 0, errors.New("unexpected copy length encountered") } logger.Debugf("Read p[%d]: % x", copiedLength, p) logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) return copiedLength, nil } // readInputEvents polls until at least one event is available. func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting // tricks the type system into believing it has such an expression. recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) countRecords := maxBytes / recordSize if countRecords > ansiterm.MAX_INPUT_EVENTS { countRecords = ansiterm.MAX_INPUT_EVENTS } else if countRecords == 0 { countRecords = 1 } logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { err = winterm.ReadConsoleInput(fd, events, &nEvents) if err != nil { return nil, err } } // Return a slice restricted to the number of returned records logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) return events[:nEvents], nil } // KeyEvent Translation Helpers var arrowKeyMapPrefix = map[uint16]string{ winterm.VK_UP: "%s%sA", winterm.VK_DOWN: "%s%sB", winterm.VK_RIGHT: "%s%sC", winterm.VK_LEFT: "%s%sD", } var keyMapPrefix = map[uint16]string{ winterm.VK_UP: "\x1B[%sA", winterm.VK_DOWN: "\x1B[%sB", winterm.VK_RIGHT: "\x1B[%sC", winterm.VK_LEFT: "\x1B[%sD", winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 winterm.VK_INSERT: "\x1B[2%s~", winterm.VK_DELETE: "\x1B[3%s~", winterm.VK_PRIOR: "\x1B[5%s~", winterm.VK_NEXT: "\x1B[6%s~", winterm.VK_F1: "", winterm.VK_F2: "", winterm.VK_F3: "\x1B[13%s~", winterm.VK_F4: "\x1B[14%s~", winterm.VK_F5: "\x1B[15%s~", winterm.VK_F6: "\x1B[17%s~", winterm.VK_F7: "\x1B[18%s~", winterm.VK_F8: "\x1B[19%s~", winterm.VK_F9: "\x1B[20%s~", winterm.VK_F10: "\x1B[21%s~", winterm.VK_F11: "\x1B[23%s~", winterm.VK_F12: "\x1B[24%s~", } // translateKeyEvents converts the input events into the appropriate ANSI string. func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { var buffer bytes.Buffer for _, event := range events { if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) } } return buffer.Bytes() } // keyToString maps the given input event record to the corresponding string. func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { if keyEvent.UnicodeChar == 0 { return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) } _, alt, control := getControlKeys(keyEvent.ControlKeyState) if control { // TODO(azlinux): Implement following control sequences // <Ctrl>-D Signals the end of input from the keyboard; also exits current shell. // <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key. // <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s. // <Ctrl>-S Suspends printing on the screen (does not stop the program). // <Ctrl>-U Deletes all characters on the current line. Also called the KILL key. // <Ctrl>-E Quits current command and creates a core } // <Alt>+Key generates ESC N Key if !control && alt { return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) } return string(keyEvent.UnicodeChar) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { shift, alt, control := getControlKeys(controlState) modifier := getControlKeysModifier(shift, alt, control) if format, ok := arrowKeyMapPrefix[key]; ok { return fmt.Sprintf(format, escapeSequence, modifier) } if format, ok := keyMapPrefix[key]; ok { return fmt.Sprintf(format, modifier) } return "" } // getControlKeys extracts the shift, alt, and ctrl key states. func getControlKeys(controlState uint32) (shift, alt, control bool) { shift = 0 != (controlState & winterm.SHIFT_PRESSED) alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) return shift, alt, control } // getControlKeysModifier returns the ANSI modifier for the given combination of control keys. func getControlKeysModifier(shift, alt, control bool) string { if shift && alt && control { return ansiterm.KEY_CONTROL_PARAM_8 } if alt && control { return ansiterm.KEY_CONTROL_PARAM_7 } if shift && control { return ansiterm.KEY_CONTROL_PARAM_6 } if control { return ansiterm.KEY_CONTROL_PARAM_5 } if shift && alt { return ansiterm.KEY_CONTROL_PARAM_4 } if alt { return ansiterm.KEY_CONTROL_PARAM_3 } if shift { return ansiterm.KEY_CONTROL_PARAM_2 } return "" }
9,313
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/windows/console.go
// +build windows package windowsconsole // import "github.com/moby/term/windows" import ( "os" "github.com/Azure/go-ansiterm/winterm" ) // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. func GetHandleInfo(in interface{}) (uintptr, bool) { switch t := in.(type) { case *ansiReader: return t.Fd(), true case *ansiWriter: return t.Fd(), true } var inFd uintptr var isTerminal bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminal = IsConsole(inFd) } return inFd, isTerminal } // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. func IsConsole(fd uintptr) bool { _, e := winterm.GetConsoleMode(fd) return e == nil }
9,314
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/windows/windows.go
// +build windows // These files implement ANSI-aware input and output streams for use by the Docker Windows client. // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. package windowsconsole // import "github.com/moby/term/windows" import ( "io/ioutil" "os" "sync" ansiterm "github.com/Azure/go-ansiterm" "github.com/sirupsen/logrus" ) var logger *logrus.Logger var initOnce sync.Once func initLogger() { initOnce.Do(func() { logFile := ioutil.Discard if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiReaderWriter.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.DebugLevel, } }) }
9,315
0
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term
kubeflow_public_repos/fate-operator/vendor/github.com/moby/term/windows/ansi_writer.go
// +build windows package windowsconsole // import "github.com/moby/term/windows" import ( "io" "os" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" ) // ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. type ansiWriter struct { file *os.File fd uintptr infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO command []byte escapeSequence []byte inAnsiSequence bool parser *ansiterm.AnsiParser } // NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a // Windows console output handle. func NewAnsiWriter(nFile int) io.Writer { initLogger() file, fd := winterm.GetStdFile(nFile) info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil } parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) logger.Infof("newAnsiWriter: parser %p", parser) aw := &ansiWriter{ file: file, fd: fd, infoReset: info, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) logger.Infof("newAnsiWriter: %v", aw) return aw } func (aw *ansiWriter) Fd() uintptr { return aw.fd } // Write writes len(p) bytes from p to the underlying data stream. func (aw *ansiWriter) Write(p []byte) (total int, err error) { if len(p) == 0 { return 0, nil } logger.Infof("Write: % x", p) logger.Infof("Write: %s", string(p)) return aw.parser.Parse(p) }
9,316
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/NOTICE
Docker Copyright 2012-2015 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
9,317
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/go.mod
module github.com/docker/go-metrics go 1.11 require github.com/prometheus/client_golang v1.1.0
9,318
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/README.md
# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. ## Best Practices This packages is meant to be used for collecting metrics in Docker projects. It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). The following are a few Docker specific rules that will help you name and work with metrics in your project. 1. Namespace and Subsystem This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. ```go ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ "version": dockerversion.Version, "commit": dockerversion.GitCommit, }) ``` In the example above we are creating metrics for the Docker engine's daemon package. `engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. 2. Declaring your Metrics Try to keep all your metric declarations in one file. This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. 3. Use labels instead of multiple metrics Labels allow you to define one metric such as the time it takes to perform a certain action on an object. If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. ```go containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") ``` The last parameter is the label name or key. When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. ```go containerActions.WithValues("create").UpdateSince(start) ``` 4. Always use a unit The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. For a timer, the standard unit is seconds and a counter's standard unit is a total. For gauges you must provide the unit. This package provides a standard set of units for use within the Docker projects. ```go Nanoseconds Unit = "nanoseconds" Seconds Unit = "seconds" Bytes Unit = "bytes" Total Unit = "total" ``` If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. ## Docs Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). ## HTTP Metrics To instrument a http handler, you can wrap the code like this: ```go namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) httpMetrics := namespace.NewDefaultHttpMetrics() metrics.Register(namespace) instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) ``` Note: The `handler` label must be provided when a new namespace is created. ## Additional Metrics Additional metrics are also defined here that are not available in the prometheus client. If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. ## Copyright and license Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
9,319
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/timer.go
package metrics import ( "time" "github.com/prometheus/client_golang/prometheus" ) // StartTimer begins a timer observation at the callsite. When the target // operation is completed, the caller should call the return done func(). func StartTimer(timer Timer) (done func()) { start := time.Now() return func() { timer.Update(time.Since(start)) } } // Timer is a metric that allows collecting the duration of an action in seconds type Timer interface { // Update records an observation, duration, and converts to the target // units. Update(duration time.Duration) // UpdateSince will add the duration from the provided starting time to the // timer's summary with the precisions that was used in creation of the timer UpdateSince(time.Time) } // LabeledTimer is a timer that must have label values populated before use. type LabeledTimer interface { WithValues(labels ...string) *labeledTimerObserver } type labeledTimer struct { m *prometheus.HistogramVec } type labeledTimerObserver struct { m prometheus.Observer } func (lbo *labeledTimerObserver) Update(duration time.Duration) { lbo.m.Observe(duration.Seconds()) } func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { lbo.m.Observe(time.Since(since).Seconds()) } func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} } func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { lt.m.Describe(c) } func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { lt.m.Collect(c) } type timer struct { m prometheus.Observer } func (t *timer) Update(duration time.Duration) { t.m.Observe(duration.Seconds()) } func (t *timer) UpdateSince(since time.Time) { t.m.Observe(time.Since(since).Seconds()) } func (t *timer) Describe(c chan<- *prometheus.Desc) { c <- t.m.(prometheus.Metric).Desc() } func (t *timer) Collect(c chan<- prometheus.Metric) { // Are there any observers that don't implement Collector? It is really // unclear what the point of the upstream change was, but we'll let this // panic if we get an observer that doesn't implement collector. In this // case, we should almost always see metricVec objects, so this should // never panic. t.m.(prometheus.Collector).Collect(c) }
9,320
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/register.go
package metrics import "github.com/prometheus/client_golang/prometheus" // Register adds all the metrics in the provided namespace to the global // metrics registry func Register(n *Namespace) { prometheus.MustRegister(n) } // Deregister removes all the metrics in the provided namespace from the // global metrics registry func Deregister(n *Namespace) { prometheus.Unregister(n) }
9,321
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/LICENSE.docs
Attribution-ShareAlike 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. Additional offer from the Licensor -- Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply. c. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ShareAlike. In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the "Licensor." Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org.
9,322
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,323
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/helpers.go
package metrics func sumFloat64(vs ...float64) float64 { var sum float64 for _, v := range vs { sum += v } return sum }
9,324
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/CONTRIBUTING.md
# Contributing ## Sign your work The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith <joe.smith@email.com> Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`.
9,325
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/gauge.go
package metrics import "github.com/prometheus/client_golang/prometheus" // Gauge is a metric that allows incrementing and decrementing a value type Gauge interface { Inc(...float64) Dec(...float64) // Add adds the provided value to the gauge's current value Add(float64) // Set replaces the gauge's current value with the provided value Set(float64) } // LabeledGauge describes a gauge the must have values populated before use. type LabeledGauge interface { WithValues(labels ...string) Gauge } type labeledGauge struct { pg *prometheus.GaugeVec } func (lg *labeledGauge) WithValues(labels ...string) Gauge { return &gauge{pg: lg.pg.WithLabelValues(labels...)} } func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { lg.pg.Describe(c) } func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { lg.pg.Collect(c) } type gauge struct { pg prometheus.Gauge } func (g *gauge) Inc(vs ...float64) { if len(vs) == 0 { g.pg.Inc() } g.Add(sumFloat64(vs...)) } func (g *gauge) Dec(vs ...float64) { if len(vs) == 0 { g.pg.Dec() } g.Add(-sumFloat64(vs...)) } func (g *gauge) Add(v float64) { g.pg.Add(v) } func (g *gauge) Set(v float64) { g.pg.Set(v) } func (g *gauge) Describe(c chan<- *prometheus.Desc) { g.pg.Describe(c) } func (g *gauge) Collect(c chan<- prometheus.Metric) { g.pg.Collect(c) }
9,326
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/namespace.go
package metrics import ( "fmt" "sync" "github.com/prometheus/client_golang/prometheus" ) type Labels map[string]string // NewNamespace returns a namespaces that is responsible for managing a collection of // metrics for a particual namespace and subsystem // // labels allows const labels to be added to all metrics created in this namespace // and are commonly used for data like application version and git commit func NewNamespace(name, subsystem string, labels Labels) *Namespace { if labels == nil { labels = make(map[string]string) } return &Namespace{ name: name, subsystem: subsystem, labels: labels, } } // Namespace describes a set of metrics that share a namespace and subsystem. type Namespace struct { name string subsystem string labels Labels mu sync.Mutex metrics []prometheus.Collector } // WithConstLabels returns a namespace with the provided set of labels merged // with the existing constant labels on the namespace. // // Only metrics created with the returned namespace will get the new constant // labels. The returned namespace must be registered separately. func (n *Namespace) WithConstLabels(labels Labels) *Namespace { n.mu.Lock() ns := &Namespace{ name: n.name, subsystem: n.subsystem, labels: mergeLabels(n.labels, labels), } n.mu.Unlock() return ns } func (n *Namespace) NewCounter(name, help string) Counter { c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} n.Add(c) return c } func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} n.Add(c) return c } func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { return prometheus.CounterOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: makeName(name, Total), Help: help, ConstLabels: prometheus.Labels(n.labels), } } func (n *Namespace) NewTimer(name, help string) Timer { t := &timer{ m: prometheus.NewHistogram(n.newTimerOpts(name, help)), } n.Add(t) return t } func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { t := &labeledTimer{ m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), } n.Add(t) return t } func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { return prometheus.HistogramOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: makeName(name, Seconds), Help: help, ConstLabels: prometheus.Labels(n.labels), } } func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { g := &gauge{ pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), } n.Add(g) return g } func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { g := &labeledGauge{ pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), } n.Add(g) return g } func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { return prometheus.GaugeOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: makeName(name, unit), Help: help, ConstLabels: prometheus.Labels(n.labels), } } func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { n.mu.Lock() defer n.mu.Unlock() for _, metric := range n.metrics { metric.Describe(ch) } } func (n *Namespace) Collect(ch chan<- prometheus.Metric) { n.mu.Lock() defer n.mu.Unlock() for _, metric := range n.metrics { metric.Collect(ch) } } func (n *Namespace) Add(collector prometheus.Collector) { n.mu.Lock() n.metrics = append(n.metrics, collector) n.mu.Unlock() } func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { name = makeName(name, unit) namespace := n.name if n.subsystem != "" { namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) } name = fmt.Sprintf("%s_%s", namespace, name) return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) } // mergeLabels merges two or more labels objects into a single map, favoring // the later labels. func mergeLabels(lbs ...Labels) Labels { merged := make(Labels) for _, target := range lbs { for k, v := range target { merged[k] = v } } return merged } func makeName(name string, unit Unit) string { if unit == "" { return name } return fmt.Sprintf("%s_%s", name, unit) } func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ DurationBuckets: defaultDurationBuckets, RequestSizeBuckets: defaultResponseSizeBuckets, ResponseSizeBuckets: defaultResponseSizeBuckets, }) } func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ DurationBuckets: durationBuckets, RequestSizeBuckets: requestSizeBuckets, ResponseSizeBuckets: responseSizeBuckets, }) } func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { var httpMetrics []*HTTPMetric inFlightMetric := n.NewInFlightGaugeMetric(handlerName) requestTotalMetric := n.NewRequestTotalMetric(handlerName) requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) return httpMetrics } func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { labels := prometheus.Labels(n.labels) labels["handler"] = handlerName metric := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: "in_flight_requests", Help: "The in-flight HTTP requests", ConstLabels: prometheus.Labels(labels), }) httpMetric := &HTTPMetric{ Collector: metric, handlerType: InstrumentHandlerInFlight, } n.Add(httpMetric) return httpMetric } func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { labels := prometheus.Labels(n.labels) labels["handler"] = handlerName metric := prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: "requests_total", Help: "Total number of HTTP requests made.", ConstLabels: prometheus.Labels(labels), }, []string{"code", "method"}, ) httpMetric := &HTTPMetric{ Collector: metric, handlerType: InstrumentHandlerCounter, } n.Add(httpMetric) return httpMetric } func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { if len(buckets) == 0 { panic("DurationBuckets must be provided") } labels := prometheus.Labels(n.labels) labels["handler"] = handlerName opts := prometheus.HistogramOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: "request_duration_seconds", Help: "The HTTP request latencies in seconds.", Buckets: buckets, ConstLabels: prometheus.Labels(labels), } metric := prometheus.NewHistogramVec(opts, []string{"method"}) httpMetric := &HTTPMetric{ Collector: metric, handlerType: InstrumentHandlerDuration, } n.Add(httpMetric) return httpMetric } func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { if len(buckets) == 0 { panic("RequestSizeBuckets must be provided") } labels := prometheus.Labels(n.labels) labels["handler"] = handlerName opts := prometheus.HistogramOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: "request_size_bytes", Help: "The HTTP request sizes in bytes.", Buckets: buckets, ConstLabels: prometheus.Labels(labels), } metric := prometheus.NewHistogramVec(opts, []string{}) httpMetric := &HTTPMetric{ Collector: metric, handlerType: InstrumentHandlerRequestSize, } n.Add(httpMetric) return httpMetric } func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { if len(buckets) == 0 { panic("ResponseSizeBuckets must be provided") } labels := prometheus.Labels(n.labels) labels["handler"] = handlerName opts := prometheus.HistogramOpts{ Namespace: n.name, Subsystem: n.subsystem, Name: "response_size_bytes", Help: "The HTTP response sizes in bytes.", Buckets: buckets, ConstLabels: prometheus.Labels(labels), } metrics := prometheus.NewHistogramVec(opts, []string{}) httpMetric := &HTTPMetric{ Collector: metrics, handlerType: InstrumentHandlerResponseSize, } n.Add(httpMetric) return httpMetric }
9,327
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/unit.go
package metrics // Unit represents the type or precision of a metric that is appended to // the metrics fully qualified name type Unit string const ( Nanoseconds Unit = "nanoseconds" Seconds Unit = "seconds" Bytes Unit = "bytes" Total Unit = "total" )
9,328
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/handler.go
package metrics import ( "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) // HTTPHandlerOpts describes a set of configurable options of http metrics type HTTPHandlerOpts struct { DurationBuckets []float64 RequestSizeBuckets []float64 ResponseSizeBuckets []float64 } const ( InstrumentHandlerResponseSize = iota InstrumentHandlerRequestSize InstrumentHandlerDuration InstrumentHandlerCounter InstrumentHandlerInFlight ) type HTTPMetric struct { prometheus.Collector handlerType int } var ( defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G defaultResponseSizeBuckets = defaultRequestSizeBuckets ) // Handler returns the global http.Handler that provides the prometheus // metrics format on GET requests. This handler is no longer instrumented. func Handler() http.Handler { return promhttp.Handler() } func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(metrics, handler.ServeHTTP) } func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { var handler http.Handler handler = http.HandlerFunc(handlerFunc) for _, metric := range metrics { switch metric.handlerType { case InstrumentHandlerResponseSize: if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { handler = promhttp.InstrumentHandlerResponseSize(collector, handler) } case InstrumentHandlerRequestSize: if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { handler = promhttp.InstrumentHandlerRequestSize(collector, handler) } case InstrumentHandlerDuration: if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { handler = promhttp.InstrumentHandlerDuration(collector, handler) } case InstrumentHandlerCounter: if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { handler = promhttp.InstrumentHandlerCounter(collector, handler) } case InstrumentHandlerInFlight: if collector, ok := metric.Collector.(prometheus.Gauge); ok { handler = promhttp.InstrumentHandlerInFlight(collector, handler) } } } return handler.ServeHTTP }
9,329
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/docs.go
// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. package metrics
9,330
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/counter.go
package metrics import "github.com/prometheus/client_golang/prometheus" // Counter is a metrics that can only increment its current count type Counter interface { // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. // // If len(vs) == 0, increments the counter by 1. Inc(vs ...float64) } // LabeledCounter is counter that must have labels populated before use. type LabeledCounter interface { WithValues(vs ...string) Counter } type labeledCounter struct { pc *prometheus.CounterVec } func (lc *labeledCounter) WithValues(vs ...string) Counter { return &counter{pc: lc.pc.WithLabelValues(vs...)} } func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { lc.pc.Describe(ch) } func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { lc.pc.Collect(ch) } type counter struct { pc prometheus.Counter } func (c *counter) Inc(vs ...float64) { if len(vs) == 0 { c.pc.Inc() } c.pc.Add(sumFloat64(vs...)) } func (c *counter) Describe(ch chan<- *prometheus.Desc) { c.pc.Describe(ch) } func (c *counter) Collect(ch chan<- prometheus.Metric) { c.pc.Collect(ch) }
9,331
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-metrics/go.sum
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
9,332
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry.go
package distribution import ( "context" "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. type Scope interface { // Contains returns true if the name belongs to the namespace. Contains(name string) bool } type fullScope struct{} func (f fullScope) Contains(string) bool { return true } // GlobalScope represents the full namespace scope which contains // all other scopes. var GlobalScope = Scope(fullScope{}) // Namespace represents a collection of repositories, addressable by name. // Generally, a namespace is backed by a set of one or more services, // providing facilities such as registry access, trust, and indexing. type Namespace interface { // Scope describes the names that can be used with this Namespace. The // global namespace will have a scope that matches all names. The scope // effectively provides an identity for the namespace. Scope() Scope // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexicographically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) // Blobs returns a blob enumerator to access all blobs Blobs() BlobEnumerator // BlobStatter returns a BlobStatter to control BlobStatter() BlobStatter } // RepositoryEnumerator describes an operation to enumerate repositories type RepositoryEnumerator interface { Enumerate(ctx context.Context, ingester func(string) error) error } // RepositoryRemover removes given repository type RepositoryRemover interface { Remove(ctx context.Context, name reference.Named) error } // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error } // WithTag allows a tag to be passed into Put func WithTag(tag string) ManifestServiceOption { return WithTagOption{tag} } // WithTagOption holds a tag type WithTagOption struct{ Tag string } // Apply conforms to the ManifestServiceOption interface func (o WithTagOption) Apply(m ManifestService) error { // no implementation return nil } // WithManifestMediaTypes lists the media types the client wishes // the server to provide. func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { return WithManifestMediaTypesOption{mediaTypes} } // WithManifestMediaTypesOption holds a list of accepted media types type WithManifestMediaTypesOption struct{ MediaTypes []string } // Apply conforms to the ManifestServiceOption interface func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { // no implementation return nil } // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. Named() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) // Blobs returns a reference to this repository's blob service. Blobs(ctx context.Context) BlobStore // TODO(stevvooe): The above BlobStore return can probably be relaxed to // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. // Tags returns a reference to this repositories tag service Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships.
9,333
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/MAINTAINERS
# Distribution maintainers file # # This file describes who runs the docker/distribution project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # [Rules] [Rules.maintainers] title = "What is a maintainer?" text = """ There are different types of maintainers, with different responsibilities, but all maintainers have 3 things in common: 1) They share responsibility in the project's success. 2) They have made a long-term, recurring time investment to improve the project. 3) They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun. Maintainers are often under-appreciated, because their work is harder to appreciate. It's easy to appreciate a really cool and technically advanced feature. It's harder to appreciate the absence of bugs, the slow but steady improvement in stability, or the reliability of a release process. But those things distinguish a good project from a great one. """ [Rules.reviewer] title = "What is a reviewer?" text = """ A reviewer is a core role within the project. They share in reviewing issues and pull requests and their LGTM count towards the required LGTM count to merge a code change into the project. Reviewers are part of the organization but do not have write access. Becoming a reviewer is a core aspect in the journey to becoming a maintainer. """ [Rules.adding-maintainers] title = "How are maintainers added?" text = """ Maintainers are first and foremost contributors that have shown they are committed to the long term success of a project. Contributors wanting to become maintainers are expected to be deeply involved in contributing code, pull request review, and triage of issues in the project for more than three months. Just contributing does not make you a maintainer, it is about building trust with the current maintainers of the project and being a person that they can depend on and trust to make decisions in the best interest of the project. Periodically, the existing maintainers curate a list of contributors that have shown regular activity on the project over the prior months. From this list, maintainer candidates are selected and proposed on the maintainers mailing list. After a candidate has been announced on the maintainers mailing list, the existing maintainers are given five business days to discuss the candidate, raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing list. Only maintainers of the repository that the candidate is proposed for are allowed to vote. If a candidate is approved, a maintainer will contact the candidate to invite the candidate to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a maintainer once the pull request is merged. """ [Rules.stepping-down-policy] title = "Stepping down policy" text = """ Life priorities, interests, and passions can change. If you're a maintainer but feel you must remove yourself from the list, inform other maintainers that you intend to step down, and if possible, help find someone to pick up your work. At the very least, ensure your work can be continued where you left off. After you've informed other maintainers, create a pull request to remove yourself from the MAINTAINERS file. """ [Rules.inactive-maintainers] title = "Removal of inactive maintainers" text = """ Similar to the procedure for adding new maintainers, existing maintainers can be removed from the list if they do not show significant activity on the project. Periodically, the maintainers review the list of maintainers and their activity over the last three months. If a maintainer has shown insufficient activity over this period, a neutral person will contact the maintainer to ask if they want to continue being a maintainer. If the maintainer decides to step down as a maintainer, they open a pull request to be removed from the MAINTAINERS file. If the maintainer wants to remain a maintainer, but is unable to perform the required duties they can be removed with a vote of at least 66% of the current maintainers. An e-mail is sent to the mailing list, inviting maintainers of the project to vote. The voting period is five business days. Issues related to a maintainer's performance should be discussed with them among the other maintainers so that they are not surprised by a pull request removing them. """ [Rules.decisions] title = "How are decisions made?" text = """ Short answer: EVERYTHING IS A PULL REQUEST. distribution is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, including its philosophy, design, road map, and APIs. *If it's part of the project, it's in the repo. If it's in the repo, it's part of the project.* As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to the API specification. A philosophy change is a change to the philosophy manifesto, and so on. All decisions affecting distribution, big and small, follow the same 3 steps: * Step 1: Open a pull request. Anyone can do this. * Step 2: Discuss the pull request. Anyone can do this. * Step 3: Merge or refuse the pull request. Who does this depends on the nature of the pull request and which areas of the project it affects. """ [Rules.DCO] title = "Helping contributors with the DCO" text = """ The [DCO or `Sign your work`]( https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) requirement is not intended as a roadblock or speed bump. Some distribution contributors are not as familiar with `git`, or have used a web based editor, and thus asking them to `git commit --amend -s` is not the best way forward. In this case, maintainers can update the commits based on clause (c) of the DCO. The most trivial way for a contributor to allow the maintainer to do this, is to add a DCO signature in a pull requests's comment, or a maintainer can simply note that the change is sufficiently trivial that it does not substantially change the existing contribution - i.e., a spelling change. When you add someone's DCO, please also add your own to keep a log. """ [Rules."no direct push"] title = "I'm a maintainer. Should I make pull requests too?" text = """ Yes. Nobody should ever push to master directly. All changes should be made through a pull request. """ [Rules.tsc] title = "Conflict Resolution and technical disputes" text = """ distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." """ [Rules.meta] title = "How is this process changed?" text = "Just like everything else: by making a pull request :)" # Current project organization [Org] [Org.Maintainers] people = [ "dmcgowan", "dmp42", "stevvooe", ] [Org.Reviewers] people = [ "manishtomar", "caervs", "davidswu", "RobbKistler" ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.caervs] Name = "Ryan Abrams" Email = "rdabrams@gmail.com" GitHub = "caervs" [people.davidswu] Name = "David Wu" Email = "dwu7401@gmail.com" GitHub = "davidswu" [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" GitHub = "dmcgowan" [people.dmp42] Name = "Olivier Gambier" Email = "olivier@docker.com" GitHub = "dmp42" [people.manishtomar] Name = "Manish Tomar" Email = "manish.tomar@docker.com" GitHub = "manishtomar" [people.RobbKistler] Name = "Robb Kistler" Email = "robb.kistler@docker.com" GitHub = "RobbKistler" [people.stevvooe] Name = "Stephen Day" Email = "stephen.day@docker.com" GitHub = "stevvooe"
9,334
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/BUILDING.md
# Building the registry source ## Use-case This is useful if you intend to actively work on the registry. ### Alternatives Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). ### Gotchas You are expected to know your way around with go & git. If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. ## Build the development environment The first prerequisite of properly building distribution targets is to have a Go development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the environment. If a Go development environment is setup, one can use `go get` to install the `registry` command from the current latest: go get github.com/docker/distribution/cmd/registry The above will install the source repository into the `GOPATH`. Now create the directory for the registry data (this might require you to set permissions properly) mkdir -p /var/lib/registry ... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. The `registry` binary can then be run with the following: $ $GOPATH/bin/registry --version $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown > __NOTE:__ While you do not need to use `go get` to checkout the distribution > project, for these build instructions to work, the project must be checked > out in the correct location in the `GOPATH`. This should almost always be > `$GOPATH/src/github.com/docker/distribution`. The registry can be run with the default config using the following incantation: $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] debug server listening localhost:5001 If it is working, one should see the above log messages. ### Repeatable Builds For the full development experience, one should `cd` into `$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` commands, such as `go test`, should work per package (please see [Developing](#developing) if they don't work). A `Makefile` has been provided as a convenience to support repeatable builds. Please install the following into `GOPATH` for it to work: go get github.com/golang/lint/golint Once these commands are available in the `GOPATH`, run `make` to get a full build: $ make + clean + fmt + vet + lint + build github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar github.com/sirupsen/logrus github.com/docker/libtrust ... github.com/yvasiyarov/gorelic github.com/docker/distribution/registry/handlers github.com/docker/distribution/cmd/registry + test ... ok github.com/docker/distribution/digest 7.875s ok github.com/docker/distribution/manifest 0.028s ok github.com/docker/distribution/notifications 17.322s ? github.com/docker/distribution/registry [no test files] ok github.com/docker/distribution/registry/api/v2 0.101s ? github.com/docker/distribution/registry/auth [no test files] ok github.com/docker/distribution/registry/auth/silly 0.011s ... + /Users/sday/go/src/github.com/docker/distribution/bin/registry + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + binaries The above provides a repeatable build using the contents of the vendor directory. This includes formatting, vetting, linting, building, testing and generating tagged binaries. We can verify this worked by running the registry binary generated in the "./bin" directory: $ ./bin/registry --version ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m ### Optional build tags Optional [build tags](http://golang.org/pkg/go/build/) can be provided using the environment variable `DOCKER_BUILDTAGS`.
9,335
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/.gometalinter.json
{ "Vendor": true, "Deadline": "2m", "Sort": ["linter", "severity", "path", "line"], "EnableGC": true, "Enable": [ "structcheck", "staticcheck", "unconvert", "gofmt", "goimports", "golint", "vet" ] }
9,336
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/README.md
# Distribution The Docker toolset to pack, ship, store, and deliver content. This repository's main product is the Docker Registry 2.0 implementation for storing and distributing Docker images. It supersedes the [docker/docker-registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. <img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/> [![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) [![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | ### How does this integrate with Docker engine? This project should provide an implementation to a V2 API for use in the [Docker core project](https://github.com/docker/docker). The API should be embeddable and simplify the process of securely pulling and pushing content from `docker` daemons. ### What are the long term goals of the Distribution project? The _Distribution_ project has the further long term goal of providing a secure tool chain for distributing content. The specifications, APIs and tools should be as useful with Docker as they are without. Our goal is to design a professional grade and extensible content distribution system that allow users to: * Enjoy an efficient, secured and reliable way to store, manage, package and exchange content * Hack/roll their own on top of healthy open-source components * Implement their own home made solution through good specs, and solid extensions mechanism. ## More about Registry 2.0 The new registry implementation provides the following benefits: - faster push and pull - new, more efficient implementation - simplified deployment - pluggable storage backend - webhook notifications For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. [Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, if they have a [Docker Hub](https://hub.docker.com/) account. For some users and even companies, this default behavior is sufficient. For others, it is not. For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) may be the better choice. ### Migration to Registry 2.0 For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been created. For more information see [docker/migrator](https://github.com/docker/migrator). ## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see the instructions for [building a development environment](BUILDING.md). ## Support If any issues are encountered while using the _Distribution_ project, several avenues are available for support: <table> <tr> <th align="left"> IRC </th> <td> #docker-distribution on FreeNode </td> </tr> <tr> <th align="left"> Issue Tracker </th> <td> github.com/docker/distribution/issues </td> </tr> <tr> <th align="left"> Google Groups </th> <td> https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution </td> </tr> <tr> <th align="left"> Mailing List </th> <td> docker@dockerproject.org </td> </tr> </table> ## License This project is distributed under [Apache License, Version 2.0](LICENSE).
9,337
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/Makefile
# Root directory of the project (absolute path). ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) PKG=github.com/docker/distribution # Project packages. PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) INTEGRATION_PACKAGE=${PKG} COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) # Project binaries. COMMANDS=registry digest registry-api-descriptor-template # Allow turning off function inlining and variable registerization ifeq (${DISABLE_OPTIMIZATION},true) GO_GCFLAGS=-gcflags "-N -l" VERSION:="$(VERSION)-noopt" endif WHALE = "+" # Go files # TESTFLAGS_RACE= GOFILES=$(shell find . -type f -name '*.go') GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' BINARIES=$(addprefix bin/,$(COMMANDS)) # Flags passed to `go test` TESTFLAGS ?= -v $(TESTFLAGS_RACE) TESTFLAGS_PARALLEL ?= 8 .PHONY: all build binaries check clean test test-race test-full integration coverage .DEFAULT: all all: binaries # This only needs to be generated by hand when cutting full releases. version/version.go: @echo "$(WHALE) $@" ./version/version.sh > $@ check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") @echo "$(WHALE) $@" gometalinter --config .gometalinter.json ./... test: ## run tests, except integration test with test.short @echo "$(WHALE) $@" @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) test-race: ## run tests, except integration test with test.short and race @echo "$(WHALE) $@" @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) test-full: ## run tests, except integration tests @echo "$(WHALE) $@" @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) integration: ## run integration tests @echo "$(WHALE) $@" @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} coverage: ## generate coverprofiles from the unit tests @echo "$(WHALE) $@" @rm -f coverage.txt @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ go test ${GO_TAGS} ${TESTFLAGS} \ -cover \ -coverprofile=profile.out \ -covermode=atomic $$pkg || exit; \ if [ -f profile.out ]; then \ cat profile.out >> coverage.txt; \ rm profile.out; \ fi; \ done ) FORCE: # Build a binary from a cmd. bin/%: cmd/% FORCE @echo "$(WHALE) $@${BINARY_SUFFIX}" @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< binaries: $(BINARIES) ## build binaries @echo "$(WHALE) $@" build: @echo "$(WHALE) $@" @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) clean: ## clean up binaries @echo "$(WHALE) $@" @rm -f $(BINARIES)
9,338
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,339
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/ROADMAP.md
# Roadmap The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release- relationship to the Docker Platform. * [Distribution Goals](#distribution-goals) * [Distribution Components](#distribution-components) * [Project Planning](#project-planning): release-relationship to the Docker Platform. This road map is a living document, providing an overview of the goals and considerations made in respect of the future of the project. ## Distribution Goals - Replace the existing [docker registry](github.com/docker/docker-registry) implementation as the primary implementation. - Replace the existing push and pull code in the docker engine with the distribution package. - Define a strong data model for distributing docker images - Provide a flexible distribution tool kit for use in the docker platform - Unlock new distribution models ## Distribution Components Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming features and bugfixes for a component will be added to the relevant milestone. If a feature or bugfix is not part of a milestone, it is currently unscheduled for implementation. * [Registry](#registry) * [Distribution Package](#distribution-package) *** ### Registry The new Docker registry is the main portion of the distribution repository. Registry 2.0 is the first release of the next-generation registry. This was primarily focused on implementing the [new registry API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), with a focus on security and performance. Following from the Distribution project goals above, we have a set of goals for registry v2 that we would like to follow in the design. New features should be compared against these goals. #### Data Storage and Distribution First The registry's first goal is to provide a reliable, consistent storage location for Docker images. The registry should only provide the minimal amount of indexing required to fetch image data and no more. This means we should be selective in new features and API additions, including those that may require expensive, ever growing indexes. Requests should be servable in "constant time". #### Content Addressability All data objects used in the registry API should be content addressable. Content identifiers should be secure and verifiable. This provides a secure, reliable base from which to build more advanced content distribution systems. #### Content Agnostic In the past, changes to the image format would require large changes in Docker and the Registry. By decoupling the distribution and image format, we can allow the formats to progress without having to coordinate between the two. This means that we should be focused on decoupling Docker from the registry just as much as decoupling the registry from Docker. Such an approach will allow us to unlock new distribution models that haven't been possible before. We can take this further by saying that the new registry should be content agnostic. The registry provides a model of names, tags, manifests and content addresses and that model can be used to work with content. #### Simplicity The new registry should be closer to a microservice component than its predecessor. This means it should have a narrower API and a low number of service dependencies. It should be easy to deploy. This means that other solutions should be explored before changing the API or adding extra dependencies. If functionality is required, can it be added as an extension or companion service. #### Extensibility The registry should provide extension points to add functionality. By keeping the scope narrow, but providing the ability to add functionality. Features like search, indexing, synchronization and registry explorers fall into this category. No such feature should be added unless we've found it impossible to do through an extension. #### Active Feature Discussions The following are feature discussions that are currently active. If you don't see your favorite, unimplemented feature, feel free to contact us via IRC or the mailing list and we can talk about adding it. The goal here is to make sure that new features go through a rigid design process before landing in the registry. ##### Proxying to other Registries A _pull-through caching_ mode exists for the registry, but is restricted from within the docker client to only mirror the official Docker Hub. This functionality can be expanded when image provenance has been specified and implemented in the distribution project. ##### Metadata storage Metadata for the registry is currently stored with the manifest and layer data on the storage backend. While this is a big win for simplicity and reliably maintaining state, it comes with the cost of consistency and high latency. The mutable registry metadata operations should be abstracted behind an API which will allow ACID compliant storage systems to handle metadata. ##### Peer to Peer transfer Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit ##### Indexing, Search and Discovery The original registry provided some implementation of search for use with private registries. Support has been elided from V2 since we'd like to both decouple search functionality from the registry. The makes the registry simpler to deploy, especially in use cases where search is not needed, and let's us decouple the image format from the registry. There are explorations into using the catalog API and notification system to build external indexes. The current line of thought is that we will define a common search API to index and query docker images. Such a system could be run as a companion to a registry or set of registries to power discovery. The main issue with search and discovery is that there are so many ways to accomplish it. There are two aspects to this project. The first is deciding on how it will be done, including an API definition that can work with changing data formats. The second is the process of integrating with `docker search`. We expect that someone attempts to address the problem with the existing tools and propose it as a standard search API or uses it to inform a standardization process. Once this has been explored, we integrate with the docker client. Please see the following for more detail: - https://github.com/docker/distribution/issues/206 ##### Deletes > __NOTE:__ Deletes are a much asked for feature. Before requesting this feature or participating in discussion, we ask that you read this section in full and understand the problems behind deletes. While, at first glance, implementing deleting seems simple, there are a number mitigating factors that make many solutions not ideal or even pathological in the context of a registry. The following paragraph discuss the background and approaches that could be applied to arrive at a solution. The goal of deletes in any system is to remove unused or unneeded data. Only data requested for deletion should be removed and no other data. Removing unintended data is worse than _not_ removing data that was requested for removal but ideally, both are supported. Generally, according to this rule, we err on holding data longer than needed, ensuring that it is only removed when we can be certain that it can be removed. With the current behavior, we opt to hold onto the data forever, ensuring that data cannot be incorrectly removed. To understand the problems with implementing deletes, one must understand the data model. All registry data is stored in a filesystem layout, implemented on a "storage driver", effectively a _virtual file system_ (VFS). The storage system must assume that this VFS layer will be eventually consistent and has poor read- after-write consistency, since this is the lower common denominator among the storage drivers. This is mitigated by writing values in reverse- dependent order, but makes wider transactional operations unsafe. Layered on the VFS model is a content-addressable _directed, acyclic graph_ (DAG) made up of blobs. Manifests reference layers. Tags reference manifests. Since the same data can be referenced by multiple manifests, we only store data once, even if it is in different repositories. Thus, we have a set of blobs, referenced by tags and manifests. If we want to delete a blob we need to be certain that it is no longer referenced by another manifest or tag. When we delete a manifest, we also can try to delete the referenced blobs. Deciding whether or not a blob has an active reference is the crux of the problem. Conceptually, deleting a manifest and its resources is quite simple. Just find all the manifests, enumerate the referenced blobs and delete the blobs not in that set. An astute observer will recognize this as a garbage collection problem. As with garbage collection in programming languages, this is very simple when one always has a consistent view. When one adds parallelism and an inconsistent view of data, it becomes very challenging. A simple example can demonstrate this. Let's say we are deleting a manifest _A_ in one process. We scan the manifest and decide that all the blobs are ready for deletion. Concurrently, we have another process accepting a new manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ is accepted and all the blobs are considered present, so the operation proceeds. The original process then deletes the referenced blobs, assuming they were unreferenced. The manifest _B_, which we thought had all of its data present, can no longer be served by the registry, since the dependent data has been deleted. Deleting data from the registry safely requires some way to coordinate this operation. The following approaches are being considered: - _Reference Counting_ - Maintain a count of references to each blob. This is challenging for a number of reasons: 1. maintaining a consistent consensus of reference counts across a set of Registries and 2. Building the initial list of reference counts for an existing registry. These challenges can be met with a consensus protocol like Paxos or Raft in the first case and a necessary but simple scan in the second.. - _Lock the World GC_ - Halt all writes to the data store. Walk the data store and find all blob references. Delete all unreferenced blobs. This approach is very simple but requires disabling writes for a period of time while the service reads all data. This is slow and expensive but very accurate and effective. - _Generational GC_ - Do something similar to above but instead of blocking writes, writes are sent to another storage backend while reads are broadcast to the new and old backends. GC is then performed on the read-only portion. Because writes land in the new backend, the data in the read-only section can be safely deleted. The main drawbacks of this approach are complexity and coordination. - _Centralized Oracle_ - Using a centralized, transactional database, we can know exactly which data is referenced at any given time. This avoids coordination problem by managing this data in a single location. We trade off metadata scalability for simplicity and performance. This is a very good option for most registry deployments. This would create a bottleneck for registry metadata. However, metadata is generally not the main bottleneck when serving images. Please let us know if other solutions exist that we have yet to enumerate. Note that for any approach, implementation is a massive consideration. For example, a mark-sweep based solution may seem simple but the amount of work in coordination offset the extra work it might take to build a _Centralized Oracle_. We'll accept proposals for any solution but please coordinate with us before dropping code. At this time, we have traded off simplicity and ease of deployment for disk space. Simplicity and ease of deployment tend to reduce developer involvement, which is currently the most expensive resource in software engineering. Taking on any solution for deletes will greatly effect these factors, trading off very cheap disk space for a complex deployment and operational story. Please see the following issues for more detail: - https://github.com/docker/distribution/issues/422 - https://github.com/docker/distribution/issues/461 - https://github.com/docker/distribution/issues/462 ### Distribution Package At its core, the Distribution Project is a set of Go packages that make up Distribution Components. At this time, most of these packages make up the Registry implementation. The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. For feature additions, please see the Registry section. In the future, we may break out a separate Roadmap for distribution-specific features that apply to more than just the registry. *** ### Project Planning An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
9,340
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/CONTRIBUTING.md
# Contributing to the registry ## Before reporting an issue... ### If your problem is with... - automated builds - your account on the [Docker Hub](https://hub.docker.com/) - any other [Docker Hub](https://hub.docker.com/) issue Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) ### If you... - need help setting up your registry - can't figure out something - are not sure what's going on or what your problem is Then please do not open an issue here yet - you should first try one of the following support forums: - irc: #docker-distribution on freenode - mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution ### Reporting security issues The Docker maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! Please **DO NOT** file a public issue, instead send your report privately to [security@docker.com](mailto:security@docker.com). ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. - search the bugtracker for an already reported issue ### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - comment if you have some new, technical and relevant information to add to the case - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. ### If you have not found an existing issue that describes your problem: 1. create a new issue, with a succinct title that describes your issue: - bad title: "It doesn't work with my docker" - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" 2. copy the output of: - `docker version` - `docker info` - `docker exec <registry-container> registry --version` 3. copy the command line you used to launch your Registry 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) 5. reproduce your problem and get your docker daemon logs showing the error 6. if relevant, copy your registry logs that show the error 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry ## Contributing a patch for a known bug, or a small correction You should follow the basic GitHub workflow: 1. fork 2. commit a change 3. make sure the tests pass 4. PR Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - sign your commits using `-s`: `git commit -s -m "My commit"` Some simple rules to ensure quick merge: - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - if you need to amend your PR following comments, please squash instead of adding more commits ## Contributing new features You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. It's mandatory to: - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - address maintainers' comments and modify your submission accordingly - write tests for any new code Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) ## Coding Style Unless explicitly stated, we follow all coding guidelines from the Go community. While some of these standards may seem arbitrary, they somehow seem to result in a solid, consistent codebase. It is possible that the code base does not currently comply with these guidelines. We are not looking for a massive PR that fixes this, since that goes against the spirit of the guidelines. All new contributions should make a best effort to clean up and make the code base better than they left it. Obviously, apply your best judgement. Remember, the goal here is to make the code base easier for humans to navigate and understand. Always keep that in mind when nudging others to comply. The rules: 1. All code should be formatted with `gofmt -s`. 2. All code should pass the default levels of [`golint`](https://github.com/golang/lint). 3. All code should follow the guidelines covered in [Effective Go](http://golang.org/doc/effective_go.html) and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments). 4. Comment the code. Tell us the why, the history and the context. 5. Document _all_ declarations and methods, even private ones. Declare expectations, caveats and anything else that may be important. If a type gets exported, having the comments already there will ensure it's ready. 6. Variable name length should be proportional to its context and no longer. `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. In practice, short methods will have short variable names and globals will have longer names. 7. No underscores in package names. If you need a compound name, step back, and re-examine why you need a compound name. If you still think you need a compound name, lose the underscore. 8. No utils or helpers packages. If a function is not general enough to warrant its own package, it has not been written generally enough to be a part of a util package. Just leave it unexported and well-documented. 9. All tests should run with `go test` and outside tooling should not be required. No, we don't need another unit testing framework. Assertion packages are acceptable if they provide _real_ incremental value. 10. Even though we call these "rules" above, they are actually just guidelines. Since you've read all the rules, you now know that. If you are having trouble getting into the mood of idiomatic Go, we recommend reading through [Effective Go](http://golang.org/doc/effective_go.html). The [Go Blog](http://blog.golang.org/) is also a great resource. Drinking the kool-aid is a lot easier than going thirsty.
9,341
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/tags.go
package distribution import ( "context" ) // TagService provides access to information about tagged objects. type TagService interface { // Get retrieves the descriptor identified by the tag. Some // implementations may differentiate between "trusted" tags and // "untrusted" tags. If a tag is "untrusted", the mapping will be returned // as an ErrTagUntrusted error, with the target descriptor. Get(ctx context.Context, tag string) (Descriptor, error) // Tag associates the tag with the provided descriptor, updating the // current association, if needed. Tag(ctx context.Context, tag string, desc Descriptor) error // Untag removes the given tag association Untag(ctx context.Context, tag string) error // All returns the set of tags managed by this tag service All(ctx context.Context) ([]string, error) // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) }
9,342
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/errors.go
package distribution import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) // ErrAccessDenied is returned when an access to a requested resource is // denied. var ErrAccessDenied = errors.New("access denied") // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") // ErrUnsupported is returned when an unimplemented or unsupported action is // performed var ErrUnsupported = errors.New("operation unsupported") // ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 // manifest but the registry is configured to reject it var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string } func (err ErrTagUnknown) Error() string { return fmt.Sprintf("unknown tag=%s", err.Tag) } // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { Name string } func (err ErrRepositoryUnknown) Error() string { return fmt.Sprintf("unknown repository name=%s", err.Name) } // ErrRepositoryNameInvalid should be used to denote an invalid repository // name. Reason may set, indicating the cause of invalidity. type ErrRepositoryNameInvalid struct { Name string Reason error } func (err ErrRepositoryNameInvalid) Error() string { return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) } // ErrManifestUnknown is returned if the manifest is not known by the // registry. type ErrManifestUnknown struct { Name string Tag string } func (err ErrManifestUnknown) Error() string { return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) } // ErrManifestUnknownRevision is returned when a manifest cannot be found by // revision within a repository. type ErrManifestUnknownRevision struct { Name string Revision digest.Digest } func (err ErrManifestUnknownRevision) Error() string { return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) } // ErrManifestUnverified is returned when the registry is unable to verify // the manifest. type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { return "unverified manifest" } // ErrManifestVerification provides a type to collect errors encountered // during manifest verification. Currently, it accepts errors of all types, // but it may be narrowed to those involving manifest verification. type ErrManifestVerification []error func (errs ErrManifestVerification) Error() string { var parts []string for _, err := range errs { parts = append(parts, err.Error()) } return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) } // ErrManifestBlobUnknown returned when a referenced blob cannot be found. type ErrManifestBlobUnknown struct { Digest digest.Digest } func (err ErrManifestBlobUnknown) Error() string { return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } // ErrManifestNameInvalid should be used to denote an invalid manifest // name. Reason may set, indicating the cause of invalidity. type ErrManifestNameInvalid struct { Name string Reason error } func (err ErrManifestNameInvalid) Error() string { return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) }
9,343
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/doc.go
// Package distribution will define the interfaces for the components of // docker distribution. The goal is to allow users to reliably package, ship // and store content related to docker images. // // This is currently a work in progress. More details are available in the // README.md. package distribution
9,344
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/vendor.conf
github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b golang.org/x/net 4876518f9e71663000c348837735820161a42df7 golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 v2.2.1 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
9,345
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/.mailmap
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com> Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com> Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com> Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com> Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com> Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu> Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com> Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com> Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com> harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com> Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com> Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com> Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com> Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr> davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com> Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net> Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com> Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com> Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com> Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com> Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local> Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email> Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com> Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com> Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io> Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com> Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com> Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com> Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn> Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com> Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com> Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
9,346
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/Dockerfile
FROM golang:1.11-alpine AS build ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV BUILDTAGS include_oss include_gcs ARG GOOS=linux ARG GOARCH=amd64 ARG GOARM=6 RUN set -ex \ && apk add --no-cache make git file WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" FROM alpine COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] CMD ["serve", "/etc/docker/registry/config.yml"]
9,347
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/manifests.go
package distribution import ( "context" "fmt" "mime" "github.com/opencontainers/go-digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. // A reference is anything which can be represented by a // distribution.Descriptor. These can consist of layers, resources or other // manifests. // // While no particular order is required, implementations should return // them from highest to lowest priority. For example, one might want to // return the base layer before the top layer. References() []Descriptor // Payload provides the serialized format of the manifest, in addition to // the media type. Payload() (mediaType string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. // Instances can be obtained from a version-specific manifest package. Manifest // specific data is passed into the function which creates the builder. type ManifestBuilder interface { // Build creates the manifest from his builder. Build(ctx context.Context) (Manifest, error) // References returns a list of objects which have been added to this // builder. The dependencies are returned in the order they were added, // which should be from base to head. References() []Descriptor // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. // // The destination of the reference is dependent on the manifest type and // the dependency type. AppendReference(dependency Describable) error } // ManifestService describes operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. Exists(ctx context.Context, dgst digest.Digest) (bool, error) // Get retrieves the manifest specified by the given digest Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) // Put creates or updates the given manifest returning the manifest digest Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error } // ManifestEnumerator enables iterating over manifests type ManifestEnumerator interface { // Enumerate calls ingester for each manifest. Enumerate(ctx context.Context, ingester func(digest.Digest) error) error } // Describable is an interface for descriptors type Describable interface { Descriptor() Descriptor } // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { if t != "" { mediaTypes = append(mediaTypes, t) } } return } // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediaType string if ctHeader != "" { var err error mediaType, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } unmarshalFunc, ok := mappings[mediaType] if !ok { unmarshalFunc, ok = mappings[""] if !ok { return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) } } return unmarshalFunc(p) } // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { if _, ok := mappings[mediaType]; ok { return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) } mappings[mediaType] = u return nil }
9,348
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/.travis.yml
dist: trusty sudo: required # setup travis so that we can run containers for integration tests services: - docker language: go go: - "1.11.x" go_import_path: github.com/docker/distribution addons: apt: packages: - python-minimal env: - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 before_install: - uname -r - sudo apt-get -q update install: - go get -u github.com/vbatts/git-validation # TODO: Add enforcement of license # - go get -u github.com/kunalkushwaha/ltag - cd $TRAVIS_BUILD_DIR script: - export GOOS=$TRAVIS_GOOS - export CGO_ENABLED=$TRAVIS_CGO_ENABLED - DCO_VERBOSITY=-q script/validate/dco - GOOS=linux script/setup/install-dev-tools - script/validate/vendor - go build -i . - make check - make build - make binaries # Currently takes too long #- if [ "$GOOS" = "linux" ]; then make test-race ; fi - if [ "$GOOS" = "linux" ]; then make coverage ; fi after_success: - bash <(curl -s https://codecov.io/bash) -F linux before_deploy: # Run tests with storage driver configurations
9,349
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/blobs.go
package distribution import ( "context" "errors" "fmt" "io" "net/http" "time" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" ) var ( // ErrBlobExists returned when blob already exists ErrBlobExists = errors.New("blob exists") // ErrBlobDigestUnsupported when blob digest is an unsupported version. ErrBlobDigestUnsupported = errors.New("unsupported blob digest") // ErrBlobUnknown when blob is not found. ErrBlobUnknown = errors.New("unknown blob") // ErrBlobUploadUnknown returned when upload is not found. ErrBlobUploadUnknown = errors.New("blob upload unknown") // ErrBlobInvalidLength returned when the blob has an expected length on // commit, meaning mismatched with the descriptor or an invalid value. ErrBlobInvalidLength = errors.New("blob invalid length") ) // ErrBlobInvalidDigest returned when digest check fails. type ErrBlobInvalidDigest struct { Digest digest.Digest Reason error } func (err ErrBlobInvalidDigest) Error() string { return fmt.Sprintf("invalid digest for referenced layer: %v, %v", err.Digest, err.Reason) } // ErrBlobMounted returned when a blob is mounted from another repository // instead of initiating an upload session. type ErrBlobMounted struct { From reference.Canonical Descriptor Descriptor } func (err ErrBlobMounted) Error() string { return fmt.Sprintf("blob mounted from: %v to: %v", err.From, err.Descriptor) } // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should // only be added but never changed. type Descriptor struct { // MediaType describe the type of the content. All text based formats are // encoded as utf-8. MediaType string `json:"mediaType,omitempty"` // Size in bytes of content. Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified // against this digest. Digest digest.Digest `json:"digest,omitempty"` // URLs contains the source URLs of this content. URLs []string `json:"urls,omitempty"` // Annotations contains arbitrary metadata relating to the targeted content. Annotations map[string]string `json:"annotations,omitempty"` // Platform describes the platform which the image in the manifest runs on. // This should only be used when referring to a manifest. Platform *v1.Platform `json:"platform,omitempty"` // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } // Descriptor returns the descriptor, to make it satisfy the Describable // interface. Note that implementations of Describable are generally objects // which can be described, not simply descriptors; this exception is in place // to make it more convenient to pass actual descriptors to functions that // expect Describable objects. func (d Descriptor) Descriptor() Descriptor { return d } // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. type BlobStatter interface { // Stat provides metadata about a blob identified by the digest. If the // blob is unknown to the describer, ErrBlobUnknown will be returned. Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) } // BlobDeleter enables deleting blobs from storage. type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } // BlobEnumerator enables iterating over blobs from storage type BlobEnumerator interface { Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error } // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is // left off of BlobService and BlobStore. type BlobDescriptorService interface { BlobStatter // SetDescriptor assigns the descriptor to the digest. The provided digest and // the digest in the descriptor must map to identical content but they may // differ on their algorithm. The descriptor must have the canonical // digest of the content and the digest algorithm must match the // annotators canonical algorithm. // // Such a facility can be used to map blobs between digest domains, with // the restriction that the algorithm of the descriptor must match the // canonical algorithm (ie sha256) of the annotator. SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error // Clear enables descriptors to be unlinked Clear(ctx context.Context, dgst digest.Digest) error } // BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. type BlobDescriptorServiceFactory interface { BlobAccessController(svc BlobDescriptorService) BlobDescriptorService } // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // BlobProvider describes operations for getting blob data. type BlobProvider interface { // Get returns the entire blob identified by digest along with the descriptor. Get(ctx context.Context, dgst digest.Digest) ([]byte, error) // Open provides a ReadSeekCloser to the blob identified by the provided // descriptor. If the blob is not known to the service, an error will be // returned. Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) } // BlobServer can serve blobs via http. type BlobServer interface { // ServeBlob attempts to serve the blob, identified by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // // This handler only issues successful responses, such as 2xx or 3xx, // meaning it serves data or issues a redirect. If the blob is not // available, an error will be returned and the caller may still issue a // response. // // The implementation may serve the same blob from a different digest // domain. The appropriate headers will be set for the blob, unless they // have already been set by the caller. ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error } // BlobIngester ingests blob data. type BlobIngester interface { // Put inserts the content p into the blob service, returning a descriptor // or an error. Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) // Create allocates a new blob writer to add a blob to this service. The // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } // BlobCreateOption is a general extensible function argument for blob creation // methods. A BlobIngester may choose to honor any or none of the given // BlobCreateOptions, which can be specific to the implementation of the // BlobIngester receiving them. // TODO (brianbland): unify this with ManifestServiceOption in the future type BlobCreateOption interface { Apply(interface{}) error } // CreateOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. type CreateOptions struct { Mount struct { ShouldMount bool From reference.Canonical // Stat allows to pass precalculated descriptor to link and return. // Blob access check will be skipped if set. Stat *Descriptor } } // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { io.WriteCloser io.ReaderFrom // Size returns the number of bytes written to this blob. Size() int64 // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. ID() string // StartedAt returns the time this blob write was started. StartedAt() time.Time // Commit completes the blob writer process. The content is verified // against the provided provisional descriptor, which may result in an // error. Depending on the implementation, written data may be validated // against the provisional descriptor fields. If MediaType is not present, // the implementation may reject the commit or assign "application/octet- // stream" to the blob. The returned descriptor may have a different // digest depending on the blob store, referred to as the canonical // descriptor. Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) // Cancel ends the blob write without storing any data and frees any // associated resources. Any data written thus far will be lost. Cancel // implementations should allow multiple calls even after a commit that // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error } // BlobService combines the operations to access, read and write blobs. This // can be used to describe remote blob services. type BlobService interface { BlobStatter BlobProvider BlobIngester } // BlobStore represent the entire suite of blob related operations. Such an // implementation can access, read, write, delete and serve blobs. type BlobStore interface { BlobService BlobServer BlobDeleter }
9,350
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/digestset/set.go
package digestset import ( "errors" "sort" "strings" "sync" digest "github.com/opencontainers/go-digest" ) var ( // ErrDigestNotFound is used when a matching digest // could not be found in a set. ErrDigestNotFound = errors.New("digest not found") // ErrDigestAmbiguous is used when multiple digests // are found in a set. None of the matching digests // should be considered valid matches. ErrDigestAmbiguous = errors.New("ambiguous digest string") ) // Set is used to hold a unique set of digests which // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other // digests in the set. If digests are omitted from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an // appropriately long short code should be used. type Set struct { mutex sync.RWMutex entries digestEntries } // NewSet creates an empty set of digests // which may have digests added. func NewSet() *Set { return &Set{ entries: digestEntries{}, } } // checkShortMatch checks whether two digests match as either whole // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false } if len(shortAlg) > 0 && string(alg) != shortAlg { return false } } else if !strings.HasPrefix(hex, shortHex) { return false } else if len(shortAlg) > 0 && string(alg) != shortAlg { return false } return true } // Lookup looks for a digest matching the given string representation. // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. func (dst *Set) Lookup(d string) (digest.Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { return "", ErrDigestNotFound } var ( searchFunc func(int) bool alg digest.Algorithm hex string ) dgst, err := digest.Parse(d) if err == digest.ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d } } else { hex = dgst.Hex() alg = dgst.Algorithm() searchFunc = func(i int) bool { if dst.entries[i].val == hex { return dst.entries[i].alg >= alg } return dst.entries[i].val >= hex } } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { return "", ErrDigestNotFound } if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { return dst.entries[idx].digest, nil } if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { return "", ErrDigestAmbiguous } return dst.entries[idx].digest, nil } // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. func (dst *Set) Add(d digest.Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) { dst.entries = append(dst.entries, entry) return nil } else if dst.entries[idx].digest == d { return nil } entries := append(dst.entries, nil) copy(entries[idx+1:], entries[idx:len(entries)-1]) entries[idx] = entry dst.entries = entries return nil } // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. func (dst *Set) Remove(d digest.Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) // Not found if idx is after or value at idx is not digest if idx == len(dst.entries) || dst.entries[idx].digest != d { return nil } entries := dst.entries copy(entries[idx:], entries[idx+1:]) entries = entries[:len(entries)-1] dst.entries = entries return nil } // All returns all the digests in the set func (dst *Set) All() []digest.Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() retValues := make([]digest.Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } return retValues } // ShortCodeTable returns a map of Digest to unique short codes. The // length represents the minimum value, the maximum length may be the // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() m := make(map[digest.Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { var short string extended := true for extended { extended = false if len(dst.entries[i].val) <= l { short = dst.entries[i].digest.String() } else { short = dst.entries[i].val[:l] for j := i + 1; j < len(dst.entries); j++ { if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { if j > resetIdx { resetIdx = j } extended = true } else { break } } if extended { l++ } } } m[dst.entries[i].digest] = short if i >= resetIdx { l = length } } return m } type digestEntry struct { alg digest.Algorithm val string digest digest.Digest } type digestEntries []*digestEntry func (d digestEntries) Len() int { return len(d) } func (d digestEntries) Less(i, j int) bool { if d[i].val != d[j].val { return d[i].val < d[j].val } return d[i].alg < d[j].alg } func (d digestEntries) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
9,351
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/repository.go
package client import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/opencontainers/go-digest" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // checkHTTPRedirect is a callback that can manipulate redirected HTTP // requests. It is used to preserve Accept and Range headers. func checkHTTPRedirect(req *http.Request, via []*http.Request) error { if len(via) >= 10 { return errors.New("stopped after 10 redirects") } if len(via) > 0 { for headerName, headerVals := range via[0].Header { if headerName != "Accept" && headerName != "Range" { continue } for _, val := range headerVals { // Don't add to redirected request if redirected // request already has a header with the same // name and value. hasValue := false for _, existingVal := range req.Header[headerName] { if existingVal == val { hasValue = true break } } if !hasValue { req.Header.Add(headerName, val) } } } } return nil } // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ Transport: transport, Timeout: 1 * time.Minute, CheckRedirect: checkHTTPRedirect, } return &registry{ client: client, ub: ub, }, nil } type registry struct { client *http.Client ub *v2.URLBuilder } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size // of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there // are no more entries func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { var numFilled int var returnErr error values := buildCatalogValues(len(entries), last) u, err := r.ub.BuildCatalogURL(values) if err != nil { return 0, err } resp, err := r.client.Get(u) if err != nil { return 0, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { var ctlg struct { Repositories []string `json:"repositories"` } decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&ctlg); err != nil { return 0, err } for cnt := range ctlg.Repositories { entries[cnt] = ctlg.Repositories[cnt] } numFilled = len(ctlg.Repositories) link := resp.Header.Get("Link") if link == "" { returnErr = io.EOF } } else { return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ Transport: transport, CheckRedirect: checkHTTPRedirect, // TODO(dmcgowan): create cookie jar } return &repository{ client: client, ub: ub, name: name, }, nil } type repository struct { client *http.Client ub *v2.URLBuilder name reference.Named } func (r *repository) Named() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ name: r.name, ub: r.ub, client: r.client, } return &blobs{ name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), } } func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ client: r.client, ub: r.ub, name: r.Named(), } } // tags implements remote tagging operations. type tags struct { client *http.Client ub *v2.URLBuilder name reference.Named } // All returns all tags func (t *tags) All(ctx context.Context) ([]string, error) { var tags []string listURLStr, err := t.ub.BuildTagsURL(t.name) if err != nil { return tags, err } listURL, err := url.Parse(listURLStr) if err != nil { return tags, err } for { resp, err := t.client.Get(listURL.String()) if err != nil { return tags, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { return tags, err } tagsResponse := struct { Tags []string `json:"tags"` }{} if err := json.Unmarshal(b, &tagsResponse); err != nil { return tags, err } tags = append(tags, tagsResponse.Tags...) if link := resp.Header.Get("Link"); link != "" { linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") linkURL, err := url.Parse(linkURLStr) if err != nil { return tags, err } listURL = listURL.ResolveReference(linkURL) } else { return tags, nil } } else { return tags, HandleErrorResponse(resp) } } } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { desc := distribution.Descriptor{} headers := response.Header ctHeader := headers.Get("Content-Type") if ctHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") } desc.MediaType = ctHeader digestHeader := headers.Get("Docker-Content-Digest") if digestHeader == "" { bytes, err := ioutil.ReadAll(response.Body) if err != nil { return distribution.Descriptor{}, err } _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) if err != nil { return distribution.Descriptor{}, err } return desc, nil } dgst, err := digest.Parse(digestHeader) if err != nil { return distribution.Descriptor{}, err } desc.Digest = dgst lengthHeader := headers.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, err } desc.Size = length return desc, nil } // Get issues a HEAD request for a Manifest against its named endpoint in order // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { ref, err := reference.WithTag(t.name, tag) if err != nil { return distribution.Descriptor{}, err } u, err := t.ub.BuildManifestURL(ref) if err != nil { return distribution.Descriptor{}, err } newRequest := func(method string) (*http.Response, error) { req, err := http.NewRequest(method, u, nil) if err != nil { return nil, err } for _, t := range distribution.ManifestMediaTypes() { req.Header.Add("Accept", t) } resp, err := t.client.Do(req) return resp, err } resp, err := newRequest("HEAD") if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() switch { case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers return descriptorFromResponse(resp) default: // if the response is an error - there will be no body to decode. // Issue a GET request: // - for data from a server that does not handle HEAD // - to get error details in case of a failure resp, err = newRequest("GET") if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 400 { return descriptorFromResponse(resp) } return distribution.Descriptor{}, HandleErrorResponse(resp) } } func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { panic("not implemented") } func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { panic("not implemented") } func (t *tags) Untag(ctx context.Context, tag string) error { panic("not implemented") } type manifests struct { name reference.Named ub *v2.URLBuilder client *http.Client etags map[string]string } func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { ref, err := reference.WithDigest(ms.name, dgst) if err != nil { return false, err } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } resp, err := ms.client.Head(u) if err != nil { return false, err } if SuccessStatus(resp.StatusCode) { return true, nil } else if resp.StatusCode == http.StatusNotFound { return false, nil } return false, HandleErrorResponse(resp) } // AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest // and ErrManifestNotModified error will be returned. etag is automatically // quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { return etagOption{tag, etag} } type etagOption struct{ tag, etag string } func (o etagOption) Apply(ms distribution.ManifestService) error { if ms, ok := ms.(*manifests); ok { ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) return nil } return fmt.Errorf("etag options is a client-only option") } // ReturnContentDigest allows a client to set a the content digest on // a successful request from the 'Docker-Content-Digest' header. This // returned digest is represents the digest which the registry uses // to refer to the content and can be used to delete the content. func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { return contentDigestOption{dgst} } type contentDigestOption struct{ digest *digest.Digest } func (o contentDigestOption) Apply(ms distribution.ManifestService) error { return nil } func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { var ( digestOrTag string ref reference.Named err error contentDgst *digest.Digest mediaTypes []string ) for _, option := range options { switch opt := option.(type) { case distribution.WithTagOption: digestOrTag = opt.Tag ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } case contentDigestOption: contentDgst = opt.digest case distribution.WithManifestMediaTypesOption: mediaTypes = opt.MediaTypes default: err := option.Apply(ms) if err != nil { return nil, err } } } if digestOrTag == "" { digestOrTag = dgst.String() ref, err = reference.WithDigest(ms.name, dgst) if err != nil { return nil, err } } if len(mediaTypes) == 0 { mediaTypes = distribution.ManifestMediaTypes() } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } for _, t := range mediaTypes { req.Header.Add("Accept", t) } if _, ok := ms.etags[digestOrTag]; ok { req.Header.Set("If-None-Match", ms.etags[digestOrTag]) } resp, err := ms.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { if contentDgst != nil { dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) if err == nil { *contentDgst = dgst } } mt := resp.Header.Get("Content-Type") body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } m, _, err := distribution.UnmarshalManifest(mt, body) if err != nil { return nil, err } return m, nil } return nil, HandleErrorResponse(resp) } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the // tag name in order to build the correct upload URL. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { ref := ms.name var tagged bool for _, option := range options { if opt, ok := option.(distribution.WithTagOption); ok { var err error ref, err = reference.WithTag(ref, opt.Tag) if err != nil { return "", err } tagged = true } else { err := option.Apply(ms) if err != nil { return "", err } } } mediaType, p, err := m.Payload() if err != nil { return "", err } if !tagged { // generate a canonical digest and Put by digest _, d, err := distribution.UnmarshalManifest(mediaType, p) if err != nil { return "", err } ref, err = reference.WithDigest(ref, d.Digest) if err != nil { return "", err } } manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) if err != nil { return "", err } putRequest.Header.Set("Content-Type", mediaType) resp, err := ms.client.Do(putRequest) if err != nil { return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { dgstHeader := resp.Header.Get("Docker-Content-Digest") dgst, err := digest.Parse(dgstHeader) if err != nil { return "", err } return dgst, nil } return "", HandleErrorResponse(resp) } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { ref, err := reference.WithDigest(ms.name, dgst) if err != nil { return err } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } req, err := http.NewRequest("DELETE", u, nil) if err != nil { return err } resp, err := ms.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 /*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { panic("not supported") }*/ type blobs struct { name reference.Named ub *v2.URLBuilder client *http.Client statter distribution.BlobDescriptorService distribution.BlobDeleter } func sanitizeLocation(location, base string) (string, error) { baseURL, err := url.Parse(base) if err != nil { return "", err } locationURL, err := url.Parse(location) if err != nil { return "", err } return baseURL.ResolveReference(locationURL).String(), nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return bs.statter.Stat(ctx, dgst) } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } defer reader.Close() return ioutil.ReadAll(reader) } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return nil, err } blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } return transport.NewHTTPReadSeeker(bs.client, blobURL, func(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } return HandleErrorResponse(resp) }), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { panic("not implemented") } func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { writer, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } dgstr := digest.Canonical.Digester() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err } if n < int64(len(p)) { return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } desc := distribution.Descriptor{ MediaType: mediaType, Size: int64(len(p)), Digest: dgstr.Digest(), } return writer.Commit(ctx, desc) } type optionFunc func(interface{}) error func (f optionFunc) Apply(v interface{}) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { return optionFunc(func(v interface{}) error { opts, ok := v.(*distribution.CreateOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) } opts.Mount.ShouldMount = true opts.Mount.From = ref return nil }) } func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { var opts distribution.CreateOptions for _, option := range options { err := option.Apply(&opts) if err != nil { return nil, err } } var values []url.Values if opts.Mount.ShouldMount { values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) } u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) if err != nil { return nil, err } resp, err := bs.client.Post(u, "", nil) if err != nil { return nil, err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusCreated: desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) if err != nil { return nil, err } return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) if err != nil { return nil, err } return &httpBlobUpload{ statter: bs.statter, client: bs.client, uuid: uuid, startedAt: time.Now(), location: location, }, nil default: return nil, HandleErrorResponse(resp) } } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } type blobStatter struct { name reference.Named ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return distribution.Descriptor{}, err } u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } resp, err := bs.client.Head(u) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } return distribution.Descriptor{ MediaType: resp.Header.Get("Content-Type"), Size: length, Digest: dgst, }, nil } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { values := url.Values{} if maxEntries > 0 { values.Add("n", strconv.Itoa(maxEntries)) } if last != "" { values.Add("last", last) } return values } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return err } blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } req, err := http.NewRequest("DELETE", blobURL, nil) if err != nil { return err } resp, err := bs.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { return nil }
9,352
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/errors.go
package client import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client/auth/challenge" ) // ErrNoErrorsInBody is returned when an HTTP response body parses to an empty // errcode.Errors slice. var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { Status string } func (e *UnexpectedHTTPStatusError) Error() string { return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { ParseErr error StatusCode int Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } // For backward compatibility, handle irregularly formatted // messages that contain a "details" field. var detailsErr struct { Details string `json:"details"` } err = json.Unmarshal(body, &detailsErr) if err == nil && detailsErr.Details != "" { switch statusCode { case http.StatusUnauthorized: return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) case http.StatusTooManyRequests: return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) default: return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) } } if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, StatusCode: statusCode, Response: body, } } if len(errors) == 0 { // If there was no error specified in the body, return // UnexpectedHTTPResponseError. return &UnexpectedHTTPResponseError{ ParseErr: ErrNoErrorsInBody, StatusCode: statusCode, Response: body, } } return errors } func makeErrorList(err error) []error { if errL, ok := err.(errcode.Errors); ok { return []error(errL) } return []error{err} } func mergeErrors(err1, err2 error) error { return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) } // HandleErrorResponse returns error parsed from HTTP response for an // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // UnexpectedHTTPStatusError returned for response code outside of expected // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode >= 400 && resp.StatusCode < 500 { // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 for _, c := range challenge.ResponseChallenges(resp) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 switch c.Parameters["error"] { case "invalid_token": err.Code = errcode.ErrorCodeUnauthorized case "insufficient_scope": err.Code = errcode.ErrorCodeDenied default: continue } if description := c.Parameters["error_description"]; description != "" { err.Message = description } else { err.Message = err.Code.Message() } return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) } } err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } return &UnexpectedHTTPStatusError{Status: resp.Status} } // SuccessStatus returns true if the argument is a successful HTTP response // code (in the range 200 - 399 inclusive). func SuccessStatus(status int) bool { return status >= 200 && status <= 399 }
9,353
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/blob_writer.go
package client import ( "bytes" "context" "fmt" "io" "io/ioutil" "net/http" "time" "github.com/docker/distribution" ) type httpBlobUpload struct { statter distribution.BlobStatter client *http.Client uuid string startedAt time.Time location string // always the last value of the location header. offset int64 closed bool } func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { panic("Not implemented") } func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) if err != nil { return 0, err } defer req.Body.Close() resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int64 if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) if err != nil { return 0, err } req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) req.Header.Set("Content-Type", "application/octet-stream") resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Size() int64 { return hbu.offset } func (hbu *httpBlobUpload) ID() string { return hbu.uuid } func (hbu *httpBlobUpload) StartedAt() time.Time { return hbu.startedAt } func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch req, err := http.NewRequest("PUT", hbu.location, nil) if err != nil { return distribution.Descriptor{}, err } values := req.URL.Query() values.Set("digest", desc.Digest.String()) req.URL.RawQuery = values.Encode() resp, err := hbu.client.Do(req) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if !SuccessStatus(resp.StatusCode) { return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } return hbu.statter.Stat(ctx, desc.Digest) } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { req, err := http.NewRequest("DELETE", hbu.location, nil) if err != nil { return err } resp, err := hbu.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { return nil } return hbu.handleErrorResponse(resp) } func (hbu *httpBlobUpload) Close() error { hbu.closed = true return nil }
9,354
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth/api_version.go
package auth import ( "net/http" "strings" ) // APIVersion represents a version of an API including its // type and version number. type APIVersion struct { // Type refers to the name of a specific API specification // such as "registry" Type string // Version is the version of the API specification implemented, // This may omit the revision number and only include // the major and minor version, such as "2.0" Version string } // String returns the string formatted API Version func (v APIVersion) String() string { return v.Type + "/" + v.Version } // APIVersions gets the API versions out of an HTTP response using the provided // version header as the key for the HTTP header. func APIVersions(resp *http.Response, versionHeader string) []APIVersion { versions := []APIVersion{} if versionHeader != "" { for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { for _, version := range strings.Fields(supportedVersions) { versions = append(versions, ParseAPIVersion(version)) } } } return versions } // ParseAPIVersion parses an API version string into an APIVersion // Format (Expected, not enforced): // API version string = <API type> '/' <API version> // API type = [a-z][a-z0-9]* // API version = [0-9]+(\.[0-9]+)? // TODO(dmcgowan): Enforce format, add error condition, remove unknown type func ParseAPIVersion(versionStr string) APIVersion { idx := strings.IndexRune(versionStr, '/') if idx == -1 { return APIVersion{ Type: "unknown", Version: versionStr, } } return APIVersion{ Type: strings.ToLower(versionStr[:idx]), Version: versionStr[idx+1:], } }
9,355
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth/session.go
package auth import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strings" "sync" "time" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" ) var ( // ErrNoBasicAuthCredentials is returned if a request can't be authorized with // basic auth due to lack of credentials. ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") // ErrNoToken is returned if a request is successful but the body does not // contain an authorization token. ErrNoToken = errors.New("authorization server did not include a token in the response") ) const defaultClientID = "registry-client" // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { // Scheme returns the scheme as expected from the "WWW-Authenicate" header. Scheme() string // AuthorizeRequest adds the authorization header to a request (if needed) // using the parameters from "WWW-Authenticate" method. The parameters // values depend on the scheme. AuthorizeRequest(req *http.Request, params map[string]string) error } // CredentialStore is an interface for getting credentials for // a given URL type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) // RefreshToken returns a refresh token for the // given URL and service RefreshToken(*url.URL, string) string // SetRefreshToken sets the refresh token if none // is provided for the given url and service SetRefreshToken(realm *url.URL, service, token string) } // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for // a given root API endpoint (for example "https://registry-1.docker.io/v2/"). func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { return &endpointAuthorizer{ challenges: manager, handlers: handlers, } } type endpointAuthorizer struct { challenges challenge.Manager handlers []AuthenticationHandler } func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { pingPath := req.URL.Path if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { pingPath = pingPath[:v2Root+4] } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { pingPath = pingPath[:v1Root] + "/v2/" } else { return nil } ping := url.URL{ Host: req.URL.Host, Scheme: req.URL.Scheme, Path: pingPath, } challenges, err := ea.challenges.GetChallenges(ping) if err != nil { return err } if len(challenges) > 0 { for _, handler := range ea.handlers { for _, c := range challenges { if c.Scheme != handler.Scheme() { continue } if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { return err } } } } return nil } // This is the minimum duration a token can last (in seconds). // A token must not live less than 60 seconds because older versions // of the Docker client didn't read their expiration from the token // response and assumed 60 seconds. So to remain compatible with // those implementations, a token must live at least this long. const minimumTokenLifetimeSeconds = 60 // Private interface for time used by this package to enable tests to provide their own implementation. type clock interface { Now() time.Time } type tokenHandler struct { creds CredentialStore transport http.RoundTripper clock clock offlineAccess bool forceOAuth bool clientID string scopes []Scope tokenLock sync.Mutex tokenCache string tokenExpiration time.Time logger Logger } // Scope is a type which is serializable to a string // using the allow scope grammar. type Scope interface { String() string } // RepositoryScope represents a token scope for access // to a repository. type RepositoryScope struct { Repository string Class string Actions []string } // String returns the string representation of the repository // using the scope grammar func (rs RepositoryScope) String() string { repoType := "repository" // Keep existing format for image class to maintain backwards compatibility // with authorization servers which do not support the expanded grammar. if rs.Class != "" && rs.Class != "image" { repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) } return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) } // RegistryScope represents a token scope for access // to resources in the registry. type RegistryScope struct { Name string Actions []string } // String returns the string representation of the user // using the scope grammar func (rs RegistryScope) String() string { return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) } // Logger defines the injectable logging interface, used on TokenHandlers. type Logger interface { Debugf(format string, args ...interface{}) } func logDebugf(logger Logger, format string, args ...interface{}) { if logger == nil { return } logger.Debugf(format, args...) } // TokenHandlerOptions is used to configure a new token handler type TokenHandlerOptions struct { Transport http.RoundTripper Credentials CredentialStore OfflineAccess bool ForceOAuth bool ClientID string Scopes []Scope Logger Logger } // An implementation of clock for providing real time data. type realClock struct{} // Now implements clock func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { // Create options... return NewTokenHandlerWithOptions(TokenHandlerOptions{ Transport: transport, Credentials: creds, Scopes: []Scope{ RepositoryScope{ Repository: scope, Actions: actions, }, }, }) } // NewTokenHandlerWithOptions creates a new token handler using the provided // options structure. func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { handler := &tokenHandler{ transport: options.Transport, creds: options.Credentials, offlineAccess: options.OfflineAccess, forceOAuth: options.ForceOAuth, clientID: options.ClientID, scopes: options.Scopes, clock: realClock{}, logger: options.Logger, } return handler } func (th *tokenHandler) client() *http.Client { return &http.Client{ Transport: th.transport, Timeout: 15 * time.Second, } } func (th *tokenHandler) Scheme() string { return "bearer" } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { additionalScopes = append(additionalScopes, RepositoryScope{ Repository: fromParam, Actions: []string{"pull"}, }.String()) } token, err := th.getToken(params, additionalScopes...) if err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil } func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) for _, scope := range th.scopes { scopes = append(scopes, scope.String()) } var addedScopes bool for _, scope := range additionalScopes { if hasScope(scopes, scope) { continue } scopes = append(scopes, scope) addedScopes = true } now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { token, expiration, err := th.fetchToken(params, scopes) if err != nil { return "", err } // do not update cache for added scope tokens if !addedScopes { th.tokenCache = token th.tokenExpiration = expiration } return token, nil } return th.tokenCache, nil } func hasScope(scopes []string, scope string) bool { for _, s := range scopes { if s == scope { return true } } return false } type postTokenResponse struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` Scope string `json:"scope"` } func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { form := url.Values{} form.Set("scope", strings.Join(scopes, " ")) form.Set("service", service) clientID := th.clientID if clientID == "" { // Use default client, this is a required field clientID = defaultClientID } form.Set("client_id", clientID) if refreshToken != "" { form.Set("grant_type", "refresh_token") form.Set("refresh_token", refreshToken) } else if th.creds != nil { form.Set("grant_type", "password") username, password := th.creds.Basic(realm) form.Set("username", username) form.Set("password", password) // attempt to get a refresh token form.Set("access_type", "offline") } else { // refuse to do oauth without a grant type return "", time.Time{}, fmt.Errorf("no supported grant type") } resp, err := th.client().PostForm(realm.String(), form) if err != nil { return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) var tr postTokenResponse if err = decoder.Decode(&tr); err != nil { return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) } if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. tr.IssuedAt = th.clock.Now().UTC() } return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil } type getTokenResponse struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` RefreshToken string `json:"refresh_token"` } func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { req, err := http.NewRequest("GET", realm.String(), nil) if err != nil { return "", time.Time{}, err } reqParams := req.URL.Query() if service != "" { reqParams.Add("service", service) } for _, scope := range scopes { reqParams.Add("scope", scope) } if th.offlineAccess { reqParams.Add("offline_token", "true") clientID := th.clientID if clientID == "" { clientID = defaultClientID } reqParams.Add("client_id", clientID) } if th.creds != nil { username, password := th.creds.Basic(realm) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) } } req.URL.RawQuery = reqParams.Encode() resp, err := th.client().Do(req) if err != nil { return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) var tr getTokenResponse if err = decoder.Decode(&tr); err != nil { return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) } if tr.RefreshToken != "" && th.creds != nil { th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } // `access_token` is equivalent to `token` and if both are specified // the choice is undefined. Canonicalize `access_token` by sticking // things in `token`. if tr.AccessToken != "" { tr.Token = tr.AccessToken } if tr.Token == "" { return "", time.Time{}, ErrNoToken } if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. tr.IssuedAt = th.clock.Now().UTC() } return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil } func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { realm, ok := params["realm"] if !ok { return "", time.Time{}, errors.New("no realm specified for token auth challenge") } // TODO(dmcgowan): Handle empty scheme and relative realm realmURL, err := url.Parse(realm) if err != nil { return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) } service := params["service"] var refreshToken string if th.creds != nil { refreshToken = th.creds.RefreshToken(realmURL, service) } if refreshToken != "" || th.forceOAuth { return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) } return th.fetchTokenWithBasicAuth(realmURL, service, scopes) } type basicHandler struct { creds CredentialStore } // NewBasicHandler creaters a new authentiation handler which adds // basic authentication credentials to a request. func NewBasicHandler(creds CredentialStore) AuthenticationHandler { return &basicHandler{ creds: creds, } } func (*basicHandler) Scheme() string { return "basic" } func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { if bh.creds != nil { username, password := bh.creds.Basic(req.URL) if username != "" && password != "" { req.SetBasicAuth(username, password) return nil } } return ErrNoBasicAuthCredentials }
9,356
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
package challenge import ( "fmt" "net/http" "net/url" "strings" "sync" ) // Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. type Challenge struct { // Scheme is the auth-scheme according to RFC 2617 Scheme string // Parameters are the auth-params according to RFC 2617 Parameters map[string]string } // Manager manages the challenges for endpoints. // The challenges are pulled out of HTTP responses. Only // responses which expect challenges should be added to // the manager, since a non-unauthorized request will be // viewed as not requiring challenges. type Manager interface { // GetChallenges returns the challenges for the given // endpoint URL. GetChallenges(endpoint url.URL) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of // the WWW-Authenicate headers and added to the // URL which was produced the response. If the // response was authorized, any challenges for the // endpoint will be cleared. AddResponse(resp *http.Response) error } // NewSimpleManager returns an instance of // Manger which only maps endpoints to challenges // based on the responses which have been added the // manager. The simple manager will make no attempt to // perform requests on the endpoints or cache the responses // to a backend. func NewSimpleManager() Manager { return &simpleManager{ Challenges: make(map[string][]Challenge), } } type simpleManager struct { sync.RWMutex Challenges map[string][]Challenge } func normalizeURL(endpoint *url.URL) { endpoint.Host = strings.ToLower(endpoint.Host) endpoint.Host = canonicalAddr(endpoint) } func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { normalizeURL(&endpoint) m.RLock() defer m.RUnlock() challenges := m.Challenges[endpoint.String()] return challenges, nil } func (m *simpleManager) AddResponse(resp *http.Response) error { challenges := ResponseChallenges(resp) if resp.Request == nil { return fmt.Errorf("missing request reference") } urlCopy := url.URL{ Path: resp.Request.URL.Path, Host: resp.Request.URL.Host, Scheme: resp.Request.URL.Scheme, } normalizeURL(&urlCopy) m.Lock() defer m.Unlock() m.Challenges[urlCopy.String()] = challenges return nil } // Octet types from RFC 2616. type octetType byte var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = <any 8-bit sequence of data> // CHAR = <any US-ASCII character (octets 0 - 127)> // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)> // CR = <US-ASCII CR, carriage return (13)> // LF = <US-ASCII LF, linefeed (10)> // SP = <US-ASCII SP, space (32)> // HT = <US-ASCII HT, horizontal-tab (9)> // <"> = <US-ASCII double-quote mark (34)> // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = <any OCTET except CTLs, but including LWS> // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1*<any CHAR except CTLs or separators> // qdtext = <any TEXT except <">> for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } // ResponseChallenges returns a list of authorization challenges // for the given http Response. Challenges are only checked if // the response status code was a 401. func ResponseChallenges(resp *http.Response) []Challenge { if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. return parseAuthHeader(resp.Header) } return nil } func parseAuthHeader(header http.Header) []Challenge { challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) } } return challenges } func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" }
9,357
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
package challenge import ( "net/url" "strings" ) // FROM: https://golang.org/src/net/http/http.go // Given a string of the form "host", "host:port", or "[ipv6::address]:port", // return true if the string includes a port. func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } // FROM: http://golang.org/src/net/http/transport.go var portMap = map[string]string{ "http": "80", "https": "443", } // canonicalAddr returns url.Host but always with a ":port" suffix // FROM: http://golang.org/src/net/http/transport.go func canonicalAddr(url *url.URL) string { addr := url.Host if !hasPort(addr) { return addr + ":" + portMap[url.Scheme] } return addr }
9,358
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/transport/transport.go
package transport import ( "io" "net/http" "sync" ) // RequestModifier represents an object which will do an inplace // modification of an HTTP request. type RequestModifier interface { ModifyRequest(*http.Request) error } type headerModifier http.Header // NewHeaderRequestModifier returns a new RequestModifier which will // add the given headers to a request. func NewHeaderRequestModifier(header http.Header) RequestModifier { return headerModifier(header) } func (h headerModifier) ModifyRequest(req *http.Request) error { for k, s := range http.Header(h) { req.Header[k] = append(req.Header[k], s...) } return nil } // NewTransport creates a new transport which will apply modifiers to // the request on a RoundTrip call. func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { return &transport{ Modifiers: modifiers, Base: base, } } // transport is an http.RoundTripper that makes HTTP requests after // copying and modifying the request type transport struct { Modifiers []RequestModifier Base http.RoundTripper mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an // access token. If no token exists or token is expired, // tries to refresh/fetch a new token. func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) for _, modifier := range t.Modifiers { if err := modifier.ModifyRequest(req2); err != nil { return nil, err } } t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) if err != nil { t.setModReq(req, nil) return nil, err } res.Body = &onEOFReader{ rc: res.Body, fn: func() { t.setModReq(req, nil) }, } return res, nil } // CancelRequest cancels an in-flight request by closing its connection. func (t *transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { t.mu.Lock() modReq := t.modReq[req] delete(t.modReq, req) t.mu.Unlock() cr.CancelRequest(modReq) } } func (t *transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } func (t *transport) setModReq(orig, mod *http.Request) { t.mu.Lock() defer t.mu.Unlock() if t.modReq == nil { t.modReq = make(map[*http.Request]*http.Request) } if mod == nil { delete(t.modReq, orig) } else { t.modReq[orig] = mod } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } type onEOFReader struct { rc io.ReadCloser fn func() } func (r *onEOFReader) Read(p []byte) (n int, err error) { n, err = r.rc.Read(p) if err == io.EOF { r.runFunc() } return } func (r *onEOFReader) Close() error { err := r.rc.Close() r.runFunc() return err } func (r *onEOFReader) runFunc() { if fn := r.fn; fn != nil { fn() r.fn = nil } }
9,359
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
package transport import ( "errors" "fmt" "io" "net/http" "regexp" "strconv" ) var ( contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) // ErrWrongCodeForByteRange is returned if the client sends a request // with a Range header but the server returns a 2xx or 3xx code other // than 206 Partial Content. ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. // TODO(dmcgowan): Move this into a separate utility package func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { return &httpReadSeeker{ client: client, url: url, errorHandler: errorHandler, } } type httpReadSeeker struct { client *http.Client url string // errorHandler creates an error from an unsuccessful HTTP response. // This allows the error to be created with the HTTP response body // without leaking the body through a returned error. errorHandler func(*http.Response) error size int64 // rc is the remote read closer. rc io.ReadCloser // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes // seekOffset instead of changing readOffset directly so that // connection resets can be delayed and possibly avoided if the // seek is undone (i.e. seeking to the end and then back to the // beginning). seekOffset int64 err error } func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { if hrs.err != nil { return 0, hrs.err } // If we sought to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is // seeking to the end to determine the length, and then seeking // back to the original position. if hrs.readerOffset != hrs.seekOffset { hrs.reset() } hrs.readerOffset = hrs.seekOffset rd, err := hrs.reader() if err != nil { return 0, err } n, err = rd.Read(p) hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) return n, err } func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if hrs.err != nil { return 0, hrs.err } lastReaderOffset := hrs.readerOffset if whence == io.SeekStart && hrs.rc == nil { // If no request has been made yet, and we are seeking to an // absolute position, set the read offset as well to avoid an // unnecessary request. hrs.readerOffset = offset } _, err := hrs.reader() if err != nil { hrs.readerOffset = lastReaderOffset return 0, err } newOffset := hrs.seekOffset switch whence { case io.SeekCurrent: newOffset += offset case io.SeekEnd: if hrs.size < 0 { return 0, errors.New("content length not known") } newOffset = hrs.size + offset case io.SeekStart: newOffset = offset } if newOffset < 0 { err = errors.New("cannot seek to negative position") } else { hrs.seekOffset = newOffset } return hrs.seekOffset, err } func (hrs *httpReadSeeker) Close() error { if hrs.err != nil { return hrs.err } // close and release reader chain if hrs.rc != nil { hrs.rc.Close() } hrs.rc = nil hrs.err = errors.New("httpLayer: closed") return nil } func (hrs *httpReadSeeker) reset() { if hrs.err != nil { return } if hrs.rc != nil { hrs.rc.Close() hrs.rc = nil } } func (hrs *httpReadSeeker) reader() (io.Reader, error) { if hrs.err != nil { return nil, hrs.err } if hrs.rc != nil { return hrs.rc, nil } req, err := http.NewRequest("GET", hrs.url, nil) if err != nil { return nil, err } if hrs.readerOffset > 0 { // If we are at different offset, issue a range request from there. req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } req.Header.Add("Accept-Encoding", "identity") resp, err := hrs.client.Do(req) if err != nil { return nil, err } // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { if hrs.readerOffset > 0 { if resp.StatusCode != http.StatusPartialContent { return nil, ErrWrongCodeForByteRange } contentRange := resp.Header.Get("Content-Range") if contentRange == "" { return nil, errors.New("no Content-Range header found in HTTP 206 response") } submatches := contentRangeRegexp.FindStringSubmatch(contentRange) if len(submatches) < 4 { return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) } startByte, err := strconv.ParseUint(submatches[1], 10, 64) if err != nil { return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) } if startByte != uint64(hrs.readerOffset) { return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) } endByte, err := strconv.ParseUint(submatches[2], 10, 64) if err != nil { return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) } if submatches[3] == "*" { hrs.size = -1 } else { size, err := strconv.ParseUint(submatches[3], 10, 64) if err != nil { return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) } if endByte+1 != size { return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) } hrs.size = int64(size) } } else if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } hrs.rc = resp.Body } else { defer resp.Body.Close() if hrs.errorHandler != nil { return nil, hrs.errorHandler(resp) } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } return hrs.rc, nil }
9,360
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
// Package cache provides facilities to speed up access to the storage // backend. package cache import ( "fmt" "github.com/docker/distribution" ) // BlobDescriptorCacheProvider provides repository scoped // BlobDescriptorService cache instances and a global descriptor cache. type BlobDescriptorCacheProvider interface { distribution.BlobDescriptorService RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } // ValidateDescriptor provides a helper function to ensure that caches have // common criteria for admitting descriptors. func ValidateDescriptor(desc distribution.Descriptor) error { if err := desc.Digest.Validate(); err != nil { return err } if desc.Size < 0 { return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) } if desc.MediaType == "" { return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) } return nil }
9,361
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
package cache import ( "context" "github.com/docker/distribution" prometheus "github.com/docker/distribution/metrics" "github.com/opencontainers/go-digest" ) // Metrics is used to hold metric counters // related to the number of times a cache was // hit or missed. type Metrics struct { Requests uint64 Hits uint64 Misses uint64 } // Logger can be provided on the MetricsTracker to log errors. // // Usually, this is just a proxy to dcontext.GetLogger. type Logger interface { Errorf(format string, args ...interface{}) } // MetricsTracker represents a metric tracker // which simply counts the number of hits and misses. type MetricsTracker interface { Hit() Miss() Metrics() Metrics Logger(context.Context) Logger } type cachedBlobStatter struct { cache distribution.BlobDescriptorService backend distribution.BlobDescriptorService tracker MetricsTracker } var ( // cacheCount is the number of total cache request received/hits/misses cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") ) // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &cachedBlobStatter{ cache: cache, backend: backend, } } // NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and // falls back to a backend. Hits and misses will send to the tracker. func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { return &cachedBlobStatter{ cache: cache, backend: backend, tracker: tracker, } } func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { cacheCount.WithValues("Request").Inc(1) desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) } goto fallback } cacheCount.WithValues("Hit").Inc(1) if cbds.tracker != nil { cbds.tracker.Hit() } return desc, nil fallback: cacheCount.WithValues("Miss").Inc(1) if cbds.tracker != nil { cbds.tracker.Miss() } desc, err = cbds.backend.Stat(ctx, dgst) if err != nil { return desc, err } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err } func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { err := cbds.cache.Clear(ctx, dgst) if err != nil { return err } err = cbds.backend.Clear(ctx, dgst) if err != nil { return err } return nil } func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { if tracker == nil { return } logger := tracker.Logger(ctx) if logger == nil { return } logger.Errorf(format, args...) }
9,362
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage/cache
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
package memory import ( "context" "sync" "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/opencontainers/go-digest" ) type inMemoryBlobDescriptorCacheProvider struct { global *mapBlobDescriptorCache repositories map[string]*mapBlobDescriptorCache mu sync.RWMutex } // NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for // storing blob descriptor data. func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { return &inMemoryBlobDescriptorCacheProvider{ global: newMapBlobDescriptorCache(), repositories: make(map[string]*mapBlobDescriptorCache), } } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { if _, err := reference.ParseNormalizedNamed(repo); err != nil { return nil, err } imbdcp.mu.RLock() defer imbdcp.mu.RUnlock() return &repositoryScopedInMemoryBlobDescriptorCache{ repo: repo, parent: imbdcp, repository: imbdcp.repositories[repo], }, nil } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return imbdcp.global.Stat(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { return imbdcp.global.Clear(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { _, err := imbdcp.Stat(ctx, dgst) if err == distribution.ErrBlobUnknown { if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { // if the digests differ, set the other canonical mapping if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { return err } } // unknown, just set it return imbdcp.global.SetDescriptor(ctx, dgst, desc) } // we already know it, do nothing return err } // repositoryScopedInMemoryBlobDescriptorCache provides the request scoped // repository cache. Instances are not thread-safe but the delegated // operations are. type repositoryScopedInMemoryBlobDescriptorCache struct { repo string parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map repository *mapBlobDescriptorCache } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { rsimbdcp.parent.mu.Lock() repo := rsimbdcp.repository rsimbdcp.parent.mu.Unlock() if repo == nil { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return repo.Stat(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { rsimbdcp.parent.mu.Lock() repo := rsimbdcp.repository rsimbdcp.parent.mu.Unlock() if repo == nil { return distribution.ErrBlobUnknown } return repo.Clear(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { rsimbdcp.parent.mu.Lock() repo := rsimbdcp.repository if repo == nil { // allocate map since we are setting it now. var ok bool // have to read back value since we may have allocated elsewhere. repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] if !ok { repo = newMapBlobDescriptorCache() rsimbdcp.parent.repositories[rsimbdcp.repo] = repo } rsimbdcp.repository = repo } rsimbdcp.parent.mu.Unlock() if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { return err } return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) } // mapBlobDescriptorCache provides a simple map-based implementation of the // descriptor cache. type mapBlobDescriptorCache struct { descriptors map[digest.Digest]distribution.Descriptor mu sync.RWMutex } var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} func newMapBlobDescriptorCache() *mapBlobDescriptorCache { return &mapBlobDescriptorCache{ descriptors: make(map[digest.Digest]distribution.Descriptor), } } func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } mbdc.mu.RLock() defer mbdc.mu.RUnlock() desc, ok := mbdc.descriptors[dgst] if !ok { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return desc, nil } func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { mbdc.mu.Lock() defer mbdc.mu.Unlock() delete(mbdc.descriptors, dgst) return nil } func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err } if err := cache.ValidateDescriptor(desc); err != nil { return err } mbdc.mu.Lock() defer mbdc.mu.Unlock() mbdc.descriptors[dgst] = desc return nil }
9,363
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/errcode/register.go
package errcode import ( "fmt" "net/http" "sort" "sync" ) var ( errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} idToDescriptors = map[string]ErrorDescriptor{} groupToDescriptors = map[string][]ErrorDescriptor{} ) var ( // ErrorCodeUnknown is a generic error that can be used as a last // resort if there is no situation-specific error message that can be used ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ Value: "UNKNOWN", Message: "unknown error", Description: `Generic error returned when the error does not have an API classification.`, HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeUnsupported is returned when an operation is not supported. ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing implementation or invalid set of parameters.`, HTTPStatusCode: http.StatusMethodNotAllowed, }) // ErrorCodeUnauthorized is returned if a request requires // authentication. ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "authentication required", Description: `The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate.`, HTTPStatusCode: http.StatusUnauthorized, }) // ErrorCodeDenied is returned if a client does not have sufficient // permission to perform an action. ErrorCodeDenied = Register("errcode", ErrorDescriptor{ Value: "DENIED", Message: "requested access to the resource is denied", Description: `The access controller denied access for the operation on a resource.`, HTTPStatusCode: http.StatusForbidden, }) // ErrorCodeUnavailable provides a common error to report unavailability // of a service or endpoint. ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ Value: "UNAVAILABLE", Message: "service unavailable", Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) // ErrorCodeTooManyRequests is returned if a client attempts too many // times to contact a service endpoint. ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ Value: "TOOMANYREQUESTS", Message: "too many requests", Description: `Returned when a client attempts to contact a service too many times`, HTTPStatusCode: http.StatusTooManyRequests, }) ) var nextCode = 1000 var registerLock sync.Mutex // Register will make the passed-in error known to the environment and // return a new ErrorCode func Register(group string, descriptor ErrorDescriptor) ErrorCode { registerLock.Lock() defer registerLock.Unlock() descriptor.Code = ErrorCode(nextCode) if _, ok := idToDescriptors[descriptor.Value]; ok { panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) } if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) } groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor nextCode++ return descriptor.Code } type byValue []ErrorDescriptor func (a byValue) Len() int { return len(a) } func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } // GetGroupNames returns the list of Error group names that are registered func GetGroupNames() []string { keys := []string{} for k := range groupToDescriptors { keys = append(keys, k) } sort.Strings(keys) return keys } // GetErrorCodeGroup returns the named group of error descriptors func GetErrorCodeGroup(name string) []ErrorDescriptor { desc := groupToDescriptors[name] sort.Sort(byValue(desc)) return desc } // GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are // registered, irrespective of what group they're in func GetErrorAllDescriptors() []ErrorDescriptor { result := []ErrorDescriptor{} for _, group := range GetGroupNames() { result = append(result, GetErrorCodeGroup(group)...) } sort.Sort(byValue(result)) return result }
9,364
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
package errcode import ( "encoding/json" "fmt" "strings" ) // ErrorCoder is the base interface for ErrorCode and Error allowing // users of each to just call ErrorCode to get the real ID of each type ErrorCoder interface { ErrorCode() ErrorCode } // ErrorCode represents the error type. The errors are serialized via strings // and the integer format may change and should *never* be exported. type ErrorCode int var _ error = ErrorCode(0) // ErrorCode just returns itself func (ec ErrorCode) ErrorCode() ErrorCode { return ec } // Error returns the ID/Value func (ec ErrorCode) Error() string { // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. func (ec ErrorCode) Descriptor() ErrorDescriptor { d, ok := errorCodeToDescriptors[ec] if !ok { return ErrorCodeUnknown.Descriptor() } return d } // String returns the canonical identifier for this error code. func (ec ErrorCode) String() string { return ec.Descriptor().Value } // Message returned the human-readable error message for this error code. func (ec ErrorCode) Message() string { return ec.Descriptor().Message } // MarshalText encodes the receiver into UTF-8-encoded text and returns the // result. func (ec ErrorCode) MarshalText() (text []byte, err error) { return []byte(ec.String()), nil } // UnmarshalText decodes the form generated by MarshalText. func (ec *ErrorCode) UnmarshalText(text []byte) error { desc, ok := idToDescriptors[string(text)] if !ok { desc = ErrorCodeUnknown.Descriptor() } *ec = desc.Code return nil } // WithMessage creates a new Error struct based on the passed-in info and // overrides the Message property. func (ec ErrorCode) WithMessage(message string) Error { return Error{ Code: ec, Message: message, } } // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithDetail(detail) } // WithArgs creates a new Error struct and sets the Args slice func (ec ErrorCode) WithArgs(args ...interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithArgs(args...) } // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code"` Message string `json:"message"` Detail interface{} `json:"detail,omitempty"` // TODO(duglin): See if we need an "args" property so we can do the // variable substitution right before showing the message to the user } var _ error = Error{} // ErrorCode returns the ID/Value of this Error func (e Error) ErrorCode() ErrorCode { return e.Code } // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with // some Detail info added func (e Error) WithDetail(detail interface{}) Error { return Error{ Code: e.Code, Message: e.Message, Detail: detail, } } // WithArgs uses the passed-in list of interface{} as the substitution // variables in the Error's Message string, but returns a new Error func (e Error) WithArgs(args ...interface{}) Error { return Error{ Code: e.Code, Message: fmt.Sprintf(e.Code.Message(), args...), Detail: e.Detail, } } // ErrorDescriptor provides relevant information about a given error code. type ErrorDescriptor struct { // Code is the error code that this descriptor describes. Code ErrorCode // Value provides a unique, string key, often captilized with // underscores, to identify the error code. This value is used as the // keyed value when serializing api errors. Value string // Message is a short, human readable decription of the error condition // included in API responses. Message string // Description provides a complete account of the errors purpose, suitable // for use in documentation. Description string // HTTPStatusCode provides the http status code that is associated with // this error condition. HTTPStatusCode int } // ParseErrorCode returns the value by the string error code. // `ErrorCodeUnknown` will be returned if the error is not known. func ParseErrorCode(value string) ErrorCode { ed, ok := idToDescriptors[value] if ok { return ed.Code } return ErrorCodeUnknown } // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. type Errors []error var _ error = Errors{} func (errs Errors) Error() string { switch len(errs) { case 0: return "<nil>" case 1: return errs[0].Error() default: msg := "errors:\n" for _, err := range errs { msg += err.Error() + "\n" } return msg } } // Len returns the current number of errors. func (errs Errors) Len() int { return len(errs) } // MarshalJSON converts slice of error, ErrorCode or Error into a // slice of Error - then serializes func (errs Errors) MarshalJSON() ([]byte, error) { var tmpErrs struct { Errors []Error `json:"errors,omitempty"` } for _, daErr := range errs { var err Error switch daErr.(type) { case ErrorCode: err = daErr.(ErrorCode).WithDetail(nil) case Error: err = daErr.(Error) default: err = ErrorCodeUnknown.WithDetail(daErr) } // If the Error struct was setup and they forgot to set the // Message field (meaning its "") then grab it from the ErrCode msg := err.Message if msg == "" { msg = err.Code.Message() } tmpErrs.Errors = append(tmpErrs.Errors, Error{ Code: err.Code, Message: msg, Detail: err.Detail, }) } return json.Marshal(tmpErrs) } // UnmarshalJSON deserializes []Error and then converts it into slice of // Error or ErrorCode func (errs *Errors) UnmarshalJSON(data []byte) error { var tmpErrs struct { Errors []Error } if err := json.Unmarshal(data, &tmpErrs); err != nil { return err } var newErrs Errors for _, daErr := range tmpErrs.Errors { // If Message is empty or exactly matches the Code's message string // then just use the Code, no need for a full Error struct if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { // Error's w/o details get converted to ErrorCode newErrs = append(newErrs, daErr.Code) } else { // Error's w/ details are untouched newErrs = append(newErrs, Error{ Code: daErr.Code, Message: daErr.Message, Detail: daErr.Detail, }) } } *errs = newErrs return nil }
9,365
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
package errcode import ( "encoding/json" "net/http" ) // ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err // and sets the content-type header to 'application/json'. It will handle // ErrorCoder and Errors, and if necessary will create an envelope. func ServeJSON(w http.ResponseWriter, err error) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") var sc int switch errs := err.(type) { case Errors: if len(errs) < 1 { break } if err, ok := errs[0].(ErrorCoder); ok { sc = err.ErrorCode().Descriptor().HTTPStatusCode } case ErrorCoder: sc = errs.ErrorCode().Descriptor().HTTPStatusCode err = Errors{err} // create an envelope. default: // We just have an unhandled error type, so just place in an envelope // and move along. err = Errors{err} } if sc == 0 { sc = http.StatusInternalServerError } w.WriteHeader(sc) return json.NewEncoder(w).Encode(err) }
9,366
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
package v2 import ( "fmt" "regexp" "strings" "unicode" ) var ( // according to rfc7230 reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) reQuotedValue = regexp.MustCompile(`^[^\\"]+`) reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) ) // parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains // a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The // function parses only the first element of the list, which is set by the very first proxy. It returns a map // of corresponding key-value pairs and an unparsed slice of the input string. // // Examples of Forwarded header values: // // 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown // 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" // // The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into // {"for": "192.0.2.43:443", "host": "registry.example.org"}. func parseForwardedHeader(forwarded string) (map[string]string, string, error) { // Following are states of forwarded header parser. Any state could transition to a failure. const ( // terminating state; can transition to Parameter stateElement = iota // terminating state; can transition to KeyValueDelimiter stateParameter // can transition to Value stateKeyValueDelimiter // can transition to one of { QuotedValue, PairEnd } stateValue // can transition to one of { EscapedCharacter, PairEnd } stateQuotedValue // can transition to one of { QuotedValue } stateEscapedCharacter // terminating state; can transition to one of { Parameter, Element } statePairEnd ) var ( parameter string value string parse = forwarded[:] res = map[string]string{} state = stateElement ) Loop: for { // skip spaces unless in quoted value if state != stateQuotedValue && state != stateEscapedCharacter { parse = strings.TrimLeftFunc(parse, unicode.IsSpace) } if len(parse) == 0 { if state != stateElement && state != statePairEnd && state != stateParameter { return nil, parse, fmt.Errorf("unexpected end of input") } // terminating break } switch state { // terminate at list element delimiter case stateElement: if parse[0] == ',' { parse = parse[1:] break Loop } state = stateParameter // parse parameter (the key of key-value pair) case stateParameter: match := reToken.FindString(parse) if len(match) == 0 { return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) } parameter = strings.ToLower(match) parse = parse[len(match):] state = stateKeyValueDelimiter // parse '=' case stateKeyValueDelimiter: if parse[0] != '=' { return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) } parse = parse[1:] state = stateValue // parse value or quoted value case stateValue: if parse[0] == '"' { parse = parse[1:] state = stateQuotedValue } else { value = reToken.FindString(parse) if len(value) == 0 { return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) } if _, exists := res[parameter]; exists { return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) } res[parameter] = value parse = parse[len(value):] value = "" state = statePairEnd } // parse a part of quoted value until the first backslash case stateQuotedValue: match := reQuotedValue.FindString(parse) value += match parse = parse[len(match):] switch { case len(parse) == 0: return nil, parse, fmt.Errorf("unterminated quoted string") case parse[0] == '"': res[parameter] = value value = "" parse = parse[1:] state = statePairEnd case parse[0] == '\\': parse = parse[1:] state = stateEscapedCharacter } // parse escaped character in a quoted string, ignore the backslash // transition back to QuotedValue state case stateEscapedCharacter: c := reEscapedCharacter.FindString(parse) if len(c) == 0 { return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) } value += c parse = parse[1:] state = stateQuotedValue // expect either a new key-value pair, new list or end of input case statePairEnd: switch parse[0] { case ';': parse = parse[1:] state = stateParameter case ',': state = stateElement default: return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) } } } return res, parse, nil }
9,367
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/urls.go
package v2 import ( "fmt" "net/http" "net/url" "strings" "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) // URLBuilder creates registry API urls from a single base endpoint. It can be // used to create urls for use in a registry client or server. // // All urls will be created from the given base, including the api version. // For example, if a root of "/foo/" is provided, urls generated will be fall // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { root *url.URL // url root (ie http://localhost/) router *mux.Router relative bool } // NewURLBuilder creates a URLBuilder with provided root url object. func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { return &URLBuilder{ root: root, router: Router(), relative: relative, } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } return NewURLBuilder(u, relative), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { var ( scheme = "http" host = r.Host ) if r.TLS != nil { scheme = "https" } else if len(r.URL.Scheme) > 0 { scheme = r.URL.Scheme } // Handle fowarded headers // Prefer "Forwarded" header as defined by rfc7239 if given // see https://tools.ietf.org/html/rfc7239 if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { forwardedHeader, _, err := parseForwardedHeader(forwarded) if err == nil { if fproto := forwardedHeader["proto"]; len(fproto) > 0 { scheme = fproto } if fhost := forwardedHeader["host"]; len(fhost) > 0 { host = fhost } } } else { if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { scheme = forwardedProto } if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { // According to the Apache mod_proxy docs, X-Forwarded-Host can be a // comma-separated list of hosts, to which each proxy appends the // requested host. We want to grab the first from this comma-separated // list. hosts := strings.SplitN(forwardedHost, ",", 2) host = strings.TrimSpace(hosts[0]) } } basePath := routeDescriptorsMap[RouteNameBase].Path requestPath := r.URL.Path index := strings.Index(requestPath, basePath) u := &url.URL{ Scheme: scheme, Host: host, } if index > 0 { // N.B. index+1 is important because we want to include the trailing / u.Path = requestPath[0 : index+1] } return NewURLBuilder(u, relative) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". func (ub *URLBuilder) BuildBaseURL() (string, error) { route := ub.cloneRoute(RouteNameBase) baseURL, err := route.URL() if err != nil { return "", err } return baseURL.String(), nil } // BuildCatalogURL constructs a url get a catalog of repositories func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameCatalog) catalogURL, err := route.URL() if err != nil { return "", err } return appendValuesURL(catalogURL, values...).String(), nil } // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } return tagsURL.String(), nil } // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) tagOrDigest := "" switch v := ref.(type) { case reference.Tagged: tagOrDigest = v.Tag() case reference.Digested: tagOrDigest = v.Digest().String() default: return "", fmt.Errorf("reference must have a tag or digest") } manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } return manifestURL.String(), nil } // BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } return layerURL.String(), nil } // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // clondedRoute returns a clone of the named route from the router. Routes // must be cloned to avoid modifying them during url generation. func (ub *URLBuilder) cloneRoute(name string) clonedRoute { route := new(mux.Route) root := new(url.URL) *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root return clonedRoute{Route: route, root: root, relative: ub.relative} } type clonedRoute struct { *mux.Route root *url.URL relative bool } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL, err := cr.Route.URL(pairs...) if err != nil { return nil, err } if cr.relative { return routeURL, nil } if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } url := cr.root.ResolveReference(routeURL) url.Scheme = cr.root.Scheme return url, nil } // appendValuesURL appends the parameters to the url. func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { merged := u.Query() for _, v := range values { for k, vv := range v { merged[k] = append(merged[k], vv...) } } u.RawQuery = merged.Encode() return u } // appendValues appends the parameters to the url. Panics if the string is not // a url. func appendValues(u string, values ...url.Values) string { up, err := url.Parse(u) if err != nil { panic(err) // should never happen } return appendValuesURL(up, values...).String() }
9,368
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/errors.go
package v2 import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) const errGroup = "registry.api.v2" var ( // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeSizeInvalid is returned when uploading a blob if the provided ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_INVALID", Message: "invalid repository name", Description: `Invalid repository name encountered either during manifest validation or any API operation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameUnknown when the repository name is not known. ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestUnknown returned when image manifest is unknown. ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestUnverified is returned when the manifest fails // signature verification. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestBlobUnknown is returned when a manifest blob is // unknown to the registry. ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a manifest blob is unknown to the registry.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_INVALID", Message: "blob upload invalid", Description: `The blob upload encountered an error and can no longer proceed.`, HTTPStatusCode: http.StatusNotFound, }) )
9,369
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
package v2 import ( "net/http" "regexp" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/opencontainers/go-digest" ) var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", Format: reference.NameRegexp.String(), Required: true, Description: `Name of the target repository.`, } referenceParameterDescriptor = ParameterDescriptor{ Name: "reference", Type: "string", Format: reference.TagRegexp.String(), Required: true, Description: `Tag or digest of the target manifest.`, } uuidParameterDescriptor = ParameterDescriptor{ Name: "uuid", Type: "opaque", Required: true, Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", } digestPathParameter = ParameterDescriptor{ Name: "digest", Type: "path", Required: true, Format: digest.DigestRegexp.String(), Description: `Digest of desired blob.`, } hostHeader = ParameterDescriptor{ Name: "Host", Type: "string", Description: "Standard HTTP Host Header. Should be set to the registry host.", Format: "<registry host>", Examples: []string{"registry-1.docker.io"}, } authHeader = ParameterDescriptor{ Name: "Authorization", Type: "string", Description: "An RFC7235 compliant authorization header.", Format: "<scheme> <token>", Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, } authChallengeHeader = ParameterDescriptor{ Name: "WWW-Authenticate", Type: "string", Description: "An RFC7235 compliant authentication challenge header.", Format: `<scheme> realm="<realm>", ..."`, Examples: []string{ `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, }, } contentLengthZeroHeader = ParameterDescriptor{ Name: "Content-Length", Description: "The `Content-Length` header must be zero and the body must be empty.", Type: "integer", Format: "0", } dockerUploadUUIDHeader = ParameterDescriptor{ Name: "Docker-Upload-UUID", Description: "Identifies the docker upload uuid for the current request.", Type: "uuid", Format: "<uuid>", } digestHeader = ParameterDescriptor{ Name: "Docker-Content-Digest", Description: "Digest of the targeted content for the request.", Type: "digest", Format: "<digest>", } linkHeader = ParameterDescriptor{ Name: "Link", Type: "link", Description: "RFC5988 compliant rel='next' with URL to next result set, if available", Format: `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`, } paginationParameters = []ParameterDescriptor{ { Name: "n", Type: "integer", Description: "Limit the number of entries in each response. It not present, all entries will be returned.", Format: "<integer>", Required: false, }, { Name: "last", Type: "string", Description: "Result set will include values lexically after last.", Format: "<integer>", Required: false, }, } unauthorizedResponseDescriptor = ResponseDescriptor{ Name: "Authentication Required", StatusCode: http.StatusUnauthorized, Description: "The client is not authenticated.", Headers: []ParameterDescriptor{ authChallengeHeader, { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnauthorized, }, } repositoryNotFoundResponseDescriptor = ResponseDescriptor{ Name: "No Such Repository Error", StatusCode: http.StatusNotFound, Description: "The repository is not known to the registry.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, }, } deniedResponseDescriptor = ResponseDescriptor{ Name: "Access Denied", StatusCode: http.StatusForbidden, Description: "The client does not have required access to the repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeDenied, }, } tooManyRequestsDescriptor = ResponseDescriptor{ Name: "Too Many Requests", StatusCode: http.StatusTooManyRequests, Description: "The client made too many requests within a time interval.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeTooManyRequests, }, } ) const ( manifestBody = `{ "name": <name>, "tag": <tag>, "fsLayers": [ { "blobSum": "<digest>" }, ... ] ], "history": <v1 images>, "signature": <JWS> }` errorsBody = `{ "errors:" [ { "code": <error code>, "message": "<error message>", "detail": ... }, ... ] }` ) // APIDescriptor exports descriptions of the layout of the v2 registry API. var APIDescriptor = struct { // RouteDescriptors provides a list of the routes available in the API. RouteDescriptors []RouteDescriptor }{ RouteDescriptors: routeDescriptors, } // RouteDescriptor describes a route specified by name. type RouteDescriptor struct { // Name is the name of the route, as specified in RouteNameXXX exports. // These names a should be considered a unique reference for a route. If // the route is registered with gorilla, this is the name that will be // used. Name string // Path is a gorilla/mux-compatible regexp that can be used to match the // route. For any incoming method and path, only one route descriptor // should match. Path string // Entity should be a short, human-readalbe description of the object // targeted by the endpoint. Entity string // Description should provide an accurate overview of the functionality // provided by the route. Description string // Methods should describe the various HTTP methods that may be used on // this route, including request and response formats. Methods []MethodDescriptor } // MethodDescriptor provides a description of the requests that may be // conducted with the target method. type MethodDescriptor struct { // Method is an HTTP method, such as GET, PUT or POST. Method string // Description should provide an overview of the functionality provided by // the covered method, suitable for use in documentation. Use of markdown // here is encouraged. Description string // Requests is a slice of request descriptors enumerating how this // endpoint may be used. Requests []RequestDescriptor } // RequestDescriptor covers a particular set of headers and parameters that // can be carried out with the parent method. Its most helpful to have one // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or // to provide quick context for the particular request. Name string // Description should cover the requests purpose, covering any details for // this particular use case. Description string // Headers describes headers that must be used with the HTTP request. Headers []ParameterDescriptor // PathParameters enumerate the parameterized path components for the // given request, as defined in the route's regular expression. PathParameters []ParameterDescriptor // QueryParameters provides a list of query parameters for the given // request. QueryParameters []ParameterDescriptor // Body describes the format of the request body. Body BodyDescriptor // Successes enumerates the possible responses that are considered to be // the result of a successful request. Successes []ResponseDescriptor // Failures covers the possible failures from this particular request. Failures []ResponseDescriptor } // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or // to provide quick context for the particular response. Name string // Description should provide a brief overview of the role of the // response. Description string // StatusCode specifies the status received by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. Headers []ParameterDescriptor // Fields describes any fields that may be present in the response. Fields []ParameterDescriptor // ErrorCodes enumerates the error codes that may be returned along with // the response. ErrorCodes []errcode.ErrorCode // Body describes the body of the response, if any. Body BodyDescriptor } // BodyDescriptor describes a request body and its expected content type. For // the most part, it should be example json or some placeholder for body // data in documentation. type BodyDescriptor struct { ContentType string Format string } // ParameterDescriptor describes the format of a request parameter, which may // be a header, path parameter or query parameter. type ParameterDescriptor struct { // Name is the name of the parameter, either of the path component or // query parameter. Name string // Type specifies the type of the parameter, such as string, integer, etc. Type string // Description provides a human-readable description of the parameter. Description string // Required means the field is required when set. Required bool // Format is a specifying the string format accepted by this parameter. Format string // Regexp is a compiled regular expression that can be used to validate // the contents of the parameter. Regexp *regexp.Regexp // Examples provides multiple examples for the values that might be valid // for this parameter. Examples []string } var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBase, Path: "/v2/", Entity: "Base", Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, Methods: []MethodDescriptor{ { Method: "GET", Description: "Check that the endpoint implements Docker Registry API V2.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Successes: []ResponseDescriptor{ { Description: "The API implements V2 protocol and is accessible.", StatusCode: http.StatusOK, }, }, Failures: []ResponseDescriptor{ { Description: "The registry does not implement the V2 API.", StatusCode: http.StatusNotFound, }, unauthorizedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, }, }, { Name: RouteNameTags, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the tags under the repository identified by `name`.", Requests: []RequestDescriptor{ { Name: "Tags", Description: "Return all tags for the repository", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": <name>, "tags": [ <tag>, ... ] }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, { Name: "Tags Paginated", Description: "Return a portion of the tags for the specified repository.", PathParameters: []ParameterDescriptor{nameParameterDescriptor}, QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, linkHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": <name>, "tags": [ <tag>, ... ], }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, }, }, { Name: RouteNameManifest, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ digestHeader, }, Body: BodyDescriptor{ ContentType: "<media type of manifest>", Format: manifestBody, }, }, }, Failures: []ResponseDescriptor{ { Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, { Method: "PUT", Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Body: BodyDescriptor{ ContentType: "<media type of manifest>", Format: manifestBody, }, Successes: []ResponseDescriptor{ { Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The canonical location url of the uploaded manifest.", Format: "<url>", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Manifest", Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", StatusCode: http.StatusBadRequest, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, ErrorCodeManifestInvalid, ErrorCodeManifestUnverified, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": "<digest>" } }, ... ] }`, }, }, { Name: "Not allowed", Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, { Method: "DELETE", Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Reference", Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Name: "Not allowed", Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, }, }, { Name: RouteNameBlob, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Name: "Fetch Blob", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob content.", Format: "<length>", }, digestHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "<blob binary data>", }, }, { Description: "The blob identified by `digest` is available at the provided location.", StatusCode: http.StatusTemporaryRedirect, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The location where the layer should be accessible.", Format: "<blob location>", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, { Name: "Fetch Blob Part", Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Range", Type: "string", Description: "HTTP Range header specifying blob chunk.", Format: "bytes=<start>-<end>", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", StatusCode: http.StatusPartialContent, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob chunk.", Format: "<length>", }, { Name: "Content-Range", Type: "byte range", Description: "Content range of blob chunk.", Format: "bytes <start>-<end>/<size>", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "<blob binary data>", }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, { Method: "DELETE", Description: "Delete the blob identified by `name` and `digest`", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "0", Format: "0", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, { Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, // TODO(stevvooe): We may want to add a PUT request here to // kickoff an upload of a blob, integrated with the blob upload // API. }, }, { Name: RouteNameBlobUpload, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ { Method: "POST", Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", Requests: []RequestDescriptor{ { Name: "Initiate Monolithic Blob Upload", Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "<length of blob>", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "query", Format: "<digest>", Regexp: digest.DigestRegexp, Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octect-stream", Format: "<binary data>", }, Successes: []ResponseDescriptor{ { Description: "The blob has been created in the registry and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "<blob location>", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, { Name: "Initiate Resumable Blob Upload", Description: "Initiate a resumable blob upload with an empty request body.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ contentLengthZeroHeader, { Name: "Location", Type: "url", Format: "/v2/<name>/blobs/uploads/<uuid>", Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Format: "0-0", Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", }, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, { Name: "Mount Blob", Description: "Mount a blob identified by the `mount` parameter from another repository.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "mount", Type: "query", Format: "<digest>", Regexp: digest.DigestRegexp, Description: `Digest of blob to mount from the source repository.`, }, { Name: "from", Type: "query", Format: "<repository name>", Regexp: reference.NameRegexp, Description: `Name of the source repository.`, }, }, Successes: []ResponseDescriptor{ { Description: "The blob has been mounted in the repository and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "<blob location>", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, }, }, { Name: RouteNameBlobUploadChunk, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", Requests: []RequestDescriptor{ { Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Successes: []ResponseDescriptor{ { Name: "Upload Progress", Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Range", Type: "header", Format: "0-<offset>", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, { Method: "PATCH", Description: "Upload a chunk of data for the specified upload.", Requests: []RequestDescriptor{ { Name: "Stream upload", Description: "Upload a stream of data to upload without completing the upload.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "<binary data>", }, Successes: []ResponseDescriptor{ { Name: "Data Accepted", Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2/<name>/blobs/uploads/<uuid>", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-<offset>", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, { Name: "Chunked upload", Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Range", Type: "header", Format: "<start of range>-<end of range, inclusive>", Required: true, Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", }, { Name: "Content-Length", Type: "integer", Format: "<length of chunk>", Description: "Length of the chunk being uploaded, corresponding the length of the request body.", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "<binary chunk>", }, Successes: []ResponseDescriptor{ { Name: "Chunk Accepted", Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2/<name>/blobs/uploads/<uuid>", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-<offset>", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, { Method: "PUT", Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", Requests: []RequestDescriptor{ { Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "<length of data>", Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "string", Format: "<digest>", Regexp: digest.DigestRegexp, Required: true, Description: `Digest of uploaded blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "<binary data>", }, Successes: []ResponseDescriptor{ { Name: "Upload Complete", Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "<blob location>", Description: "The canonical location of the blob for retrieval", }, { Name: "Content-Range", Type: "header", Format: "<start of range>-<end of range, inclusive>", Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, { Method: "DELETE", Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", Requests: []RequestDescriptor{ { Description: "Cancel the upload specified by `uuid`.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, Successes: []ResponseDescriptor{ { Name: "Upload Deleted", Description: "The upload has been successfully deleted.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ contentLengthZeroHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, tooManyRequestsDescriptor, }, }, }, }, }, }, { Name: RouteNameCatalog, Path: "/v2/_catalog", Entity: "Catalog", Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve a sorted, json list of repositories available in the registry.", Requests: []RequestDescriptor{ { Name: "Catalog Fetch", Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", Successes: []ResponseDescriptor{ { Description: "Returns the unabridged list of repositories as a json response.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ <name>, ... ] }`, }, }, }, }, { Name: "Catalog Fetch Paginated", Description: "Return the specified portion of repositories.", QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ <name>, ... ] "next": "<url>?last=<name>&n=<last value of n>" }`, }, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "<length>", }, linkHeader, }, }, }, }, }, }, }, }, } var routeDescriptorsMap map[string]RouteDescriptor func init() { routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) for _, descriptor := range routeDescriptors { routeDescriptorsMap[descriptor.Name] = descriptor } }
9,370
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/doc.go
// Package v2 describes routes, urls and the error codes used in the Docker // Registry JSON HTTP API V2. In addition to declarations, descriptors are // provided for routes and error codes that can be used for implementation and // automatically generating documentation. // // Definitions here are considered to be locked down for the V2 registry api. // Any changes must be considered carefully and should not proceed without a // change proposal in docker core. package v2
9,371
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/registry/api/v2/routes.go
package v2 import "github.com/gorilla/mux" // The following are definitions of the name under which all V2 routes are // registered. These symbols can be used to look up a route based on the name. const ( RouteNameBase = "base" RouteNameManifest = "manifest" RouteNameTags = "tags" RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" RouteNameCatalog = "catalog" ) // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. func Router() *mux.Router { return RouterWithPrefix("") } // RouterWithPrefix builds a gorilla router with a configured prefix // on all routes. func RouterWithPrefix(prefix string) *mux.Router { rootRouter := mux.NewRouter() router := rootRouter if prefix != "" { router = router.PathPrefix(prefix).Subrouter() } router.StrictSlash(true) for _, descriptor := range routeDescriptors { router.Path(descriptor.Path).Name(descriptor.Name) } return rootRouter }
9,372
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/metrics/prometheus.go
package metrics import "github.com/docker/go-metrics" const ( // NamespacePrefix is the namespace of prometheus metrics NamespacePrefix = "registry" ) var ( // StorageNamespace is the prometheus namespace of blob/cache related operations StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) )
9,373
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/reference/regexp.go
package reference import "regexp" var ( // alphaNumericRegexp defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. alphaNumericRegexp = match(`[a-z0-9]+`) // separatorRegexp defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple // dashes. separatorRegexp = match(`(?:[._]|__|[-]*)`) // nameComponentRegexp restricts registry path component names to start // with at least one letter or number, with following parts able to be // separated by one period, one or two underscore and multiple dashes. nameComponentRegexp = expression( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) // domainComponentRegexp restricts the registry domain component of a // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. DomainRegexp = expression( domainComponentRegexp, optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. anchoredNameRegexp = anchored( optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) // IdentifierRegexp is the format for string identifier used as a // content addressable identifier using sha256. These identifiers // are like digests without the algorithm, since sha256 is used. IdentifierRegexp = match(`([a-f0-9]{64})`) // ShortIdentifierRegexp is the format used to represent a prefix // of an identifier. A prefix may be used to match a sha256 identifier // within a list of trusted identifiers. ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) // anchoredIdentifierRegexp is used to check or match an // identifier value, anchored at start and end of string. anchoredIdentifierRegexp = anchored(IdentifierRegexp) // anchoredShortIdentifierRegexp is used to check if a value // is a possible identifier prefix, anchored at start and end // of string. anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. var match = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. func literal(s string) *regexp.Regexp { re := match(regexp.QuoteMeta(s)) if _, complete := re.LiteralPrefix(); !complete { panic("must be a literal") } return re } // expression defines a full expression, where each regular expression must // follow the previous. func expression(res ...*regexp.Regexp) *regexp.Regexp { var s string for _, re := range res { s += re.String() } return match(s) } // optional wraps the expression in a non-capturing group and makes the // production optional. func optional(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `?`) } // repeated wraps the regexp in a non-capturing group to get one or more // matches. func repeated(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `+`) } // group wraps the regexp in a non-capturing group. func group(res ...*regexp.Regexp) *regexp.Regexp { return match(`(?:` + expression(res...).String() + `)`) } // capture wraps the expression in a capturing group. func capture(res ...*regexp.Regexp) *regexp.Regexp { return match(`(` + expression(res...).String() + `)`) } // anchored anchors the regular expression by adding start and end delimiters. func anchored(res ...*regexp.Regexp) *regexp.Regexp { return match(`^` + expression(res...).String() + `$`) }
9,374
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/reference/normalize.go
package reference import ( "errors" "fmt" "strings" "github.com/docker/distribution/digestset" "github.com/opencontainers/go-digest" ) var ( legacyDefaultDomain = "index.docker.io" defaultDomain = "docker.io" officialRepoName = "library" defaultTag = "latest" ) // normalizedNamed represents a name which has been // normalized and has a familiar form. A familiar name // is what is used in Docker UI. An example normalized // name is "docker.io/library/ubuntu" and corresponding // familiar name of "ubuntu". type normalizedNamed interface { Named Familiar() Named } // ParseNormalizedNamed parses a string into a named reference // transforming a familiar name from Docker UI to a fully // qualified reference. If the value may be an identifier // use ParseAnyReference. func ParseNormalizedNamed(s string) (Named, error) { if ok := anchoredIdentifierRegexp.MatchString(s); ok { return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) } domain, remainder := splitDockerDomain(s) var remoteName string if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { remoteName = remainder[:tagSep] } else { remoteName = remainder } if strings.ToLower(remoteName) != remoteName { return nil, errors.New("invalid reference format: repository name must be lowercase") } ref, err := Parse(domain + "/" + remainder) if err != nil { return nil, err } named, isNamed := ref.(Named) if !isNamed { return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { domain, remainder = defaultDomain, name } else { domain, remainder = name[:i], name[i+1:] } if domain == legacyDefaultDomain { domain = defaultDomain } if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { remainder = officialRepoName + "/" + remainder } return } // familiarizeName returns a shortened version of the name familiar // to to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". // Returns a familiarized named only reference. func familiarizeName(named namedRepository) repository { repo := repository{ domain: named.Domain(), path: named.Path(), } if repo.domain == defaultDomain { repo.domain = "" // Handle official repositories which have the pattern "library/<official repo name>" if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { repo.path = split[1] } } return repo } func (r reference) Familiar() Named { return reference{ namedRepository: familiarizeName(r.namedRepository), tag: r.tag, digest: r.digest, } } func (r repository) Familiar() Named { return familiarizeName(r) } func (t taggedReference) Familiar() Named { return taggedReference{ namedRepository: familiarizeName(t.namedRepository), tag: t.tag, } } func (c canonicalReference) Familiar() Named { return canonicalReference{ namedRepository: familiarizeName(c.namedRepository), digest: c.digest, } } // TagNameOnly adds the default tag "latest" to a reference if it only has // a repo name. func TagNameOnly(ref Named) Named { if IsNameOnly(ref) { namedTagged, err := WithTag(ref, defaultTag) if err != nil { // Default tag must be valid, to create a NamedTagged // type with non-validated input the WithTag function // should be used instead panic(err) } return namedTagged } return ref } // ParseAnyReference parses a reference string as a possible identifier, // full digest, or familiar name. func ParseAnyReference(ref string) (Reference, error) { if ok := anchoredIdentifierRegexp.MatchString(ref); ok { return digestReference("sha256:" + ref), nil } if dgst, err := digest.Parse(ref); err == nil { return digestReference(dgst), nil } return ParseNormalizedNamed(ref) } // ParseAnyReferenceWithSet parses a reference string as a possible short // identifier to be matched in a digest set, a full digest, or familiar name. func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { dgst, err := ds.Lookup(ref) if err == nil { return digestReference(dgst), nil } } else { if dgst, err := digest.Parse(ref); err == nil { return digestReference(dgst), nil } } return ParseNormalizedNamed(ref) }
9,375
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/reference/helpers.go
package reference import "path" // IsNameOnly returns true if reference only contains a repo name. func IsNameOnly(ref Named) bool { if _, ok := ref.(NamedTagged); ok { return false } if _, ok := ref.(Canonical); ok { return false } return true } // FamiliarName returns the familiar name string // for the given named, familiarizing if needed. func FamiliarName(ref Named) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().Name() } return ref.Name() } // FamiliarString returns the familiar string representation // for the given reference, familiarizing if needed. func FamiliarString(ref Reference) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().String() } return ref.String() } // FamiliarMatch reports whether ref matches the specified pattern. // See https://godoc.org/path#Match for supported patterns. func FamiliarMatch(pattern string, ref Reference) (bool, error) { matched, err := path.Match(pattern, FamiliarString(ref)) if namedRef, isNamed := ref.(Named); isNamed && !matched { matched, _ = path.Match(pattern, FamiliarName(namedRef)) } return matched, err }
9,376
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution
kubeflow_public_repos/fate-operator/vendor/github.com/docker/distribution/reference/reference.go
// Package reference provides a general type to represent any way of referencing images within the registry. // Its main purpose is to abstract tags and digests (content-addressable hash). // // Grammar // // reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // // identifier := /[a-f0-9]{64}/ // short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) const ( // NameTotalLengthMax is the maximum total number of characters in a repository name. NameTotalLengthMax = 255 ) var ( // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. ErrReferenceInvalidFormat = errors.New("invalid reference format") // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. ErrTagInvalidFormat = errors.New("invalid tag format") // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. ErrNameContainsUppercase = errors.New("repository name must be lowercase") // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include // modifiers such as a hostname, name, tag, and digest. type Reference interface { // String returns the full reference String() string } // Field provides a wrapper type for resolving correct reference types when // working with encoding. type Field struct { reference Reference } // AsField wraps a reference in a Field for encoding. func AsField(reference Reference) Field { return Field{reference} } // Reference unwraps the reference type from the field to // return the Reference object. This object should be // of the appropriate type to further check for different // reference types. func (f Field) Reference() Reference { return f.reference } // MarshalText serializes the field to byte text which // is the string of the reference. func (f Field) MarshalText() (p []byte, err error) { return []byte(f.reference.String()), nil } // UnmarshalText parses text bytes by invoking the // reference parser to ensure the appropriately // typed reference object is wrapped by field. func (f *Field) UnmarshalText(p []byte) error { r, err := Parse(string(p)) if err != nil { return err } f.reference = r return nil } // Named is an object with a full name type Named interface { Reference Name() string } // Tagged is an object which has a tag type Tagged interface { Reference Tag() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Digested is an object which has a digest // in which it can be referenced by type Digested interface { Reference Digest() digest.Digest } // Canonical reference is an object with a fully unique // name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } // namedRepository is a reference to a repository with a name. // A namedRepository has both domain and path components. type namedRepository interface { Named Domain() string Path() string } // Domain returns the domain part of the Named reference func Domain(named Named) string { if r, ok := named.(namedRepository); ok { return r.Domain() } domain, _ := splitDomain(named.Name()) return domain } // Path returns the name without the domain part of the Named reference func Path(named Named) (name string) { if r, ok := named.(namedRepository); ok { return r.Path() } _, path := splitDomain(named.Name()) return path } func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { return "", name } return match[1], match[2] } // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name // DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() } return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. // NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { if s == "" { return nil, ErrNameEmpty } if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { return nil, ErrNameContainsUppercase } return nil, ErrReferenceInvalidFormat } if len(matches[1]) > NameTotalLengthMax { return nil, ErrNameTooLong } var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) if nameMatch != nil && len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { repo.domain = "" repo.path = matches[1] } ref := reference{ namedRepository: repo, tag: matches[2], } if matches[3] != "" { var err error ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } } r := getBestReferenceType(ref) if r == nil { return nil, ErrNameEmpty } return r, nil } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name and be in the canonical // form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } if named.String() != s { return nil, ErrNameNotCanonical } return named, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } return repository{ domain: match[1], path: match[2], }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if canonical, ok := name.(Canonical); ok { return reference{ namedRepository: repo, tag: tag, digest: canonical.Digest(), }, nil } return taggedReference{ namedRepository: repo, tag: tag, }, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if tagged, ok := name.(Tagged); ok { return reference{ namedRepository: repo, tag: tagged.Tag(), digest: digest, }, nil } return canonicalReference{ namedRepository: repo, digest: digest, }, nil } // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { domain, path := SplitHostname(ref) return repository{ domain: domain, path: path, } } func getBestReferenceType(ref reference) Reference { if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) } return nil } if ref.tag == "" { if ref.digest != "" { return canonicalReference{ namedRepository: ref.namedRepository, digest: ref.digest, } } return ref.namedRepository } if ref.digest == "" { return taggedReference{ namedRepository: ref.namedRepository, tag: ref.tag, } } return ref } type reference struct { namedRepository tag string digest digest.Digest } func (r reference) String() string { return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { return r.tag } func (r reference) Digest() digest.Digest { return r.digest } type repository struct { domain string path string } func (r repository) String() string { return r.Name() } func (r repository) Name() string { if r.domain == "" { return r.path } return r.domain + "/" + r.path } func (r repository) Domain() string { return r.domain } func (r repository) Path() string { return r.path } type digestReference digest.Digest func (d digestReference) String() string { return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { return digest.Digest(d) } type taggedReference struct { namedRepository tag string } func (t taggedReference) String() string { return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { return t.tag } type canonicalReference struct { namedRepository digest digest.Digest } func (c canonicalReference) String() string { return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { return c.digest }
9,377
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/NOTICE
Docker Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
9,378
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/AUTHORS
# This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. Aanand Prasad <aanand.prasad@gmail.com> Aaron Davidson <aaron@databricks.com> Aaron Feng <aaron.feng@gmail.com> Aaron Hnatiw <aaron@griddio.com> Aaron Huslage <huslage@gmail.com> Aaron L. Xu <liker.xu@foxmail.com> Aaron Lehmann <aaron.lehmann@docker.com> Aaron Welch <welch@packet.net> Aaron.L.Xu <likexu@harmonycloud.cn> Abel Muiño <amuino@gmail.com> Abhijeet Kasurde <akasurde@redhat.com> Abhinandan Prativadi <abhi@docker.com> Abhinav Ajgaonkar <abhinav316@gmail.com> Abhishek Chanda <abhishek.becs@gmail.com> Abhishek Sharma <abhishek@asharma.me> Abin Shahab <ashahab@altiscale.com> Adam Avilla <aavilla@yp.com> Adam Eijdenberg <adam.eijdenberg@gmail.com> Adam Kunk <adam.kunk@tiaa-cref.org> Adam Miller <admiller@redhat.com> Adam Mills <adam@armills.info> Adam Pointer <adam.pointer@skybettingandgaming.com> Adam Singer <financeCoding@gmail.com> Adam Walz <adam@adamwalz.net> Addam Hardy <addam.hardy@gmail.com> Aditi Rajagopal <arajagopal@us.ibm.com> Aditya <aditya@netroy.in> Adnan Khan <adnkha@amazon.com> Adolfo Ochagavía <aochagavia92@gmail.com> Adria Casas <adriacasas88@gmail.com> Adrian Moisey <adrian@changeover.za.net> Adrian Mouat <adrian.mouat@gmail.com> Adrian Oprea <adrian@codesi.nz> Adrien Folie <folie.adrien@gmail.com> Adrien Gallouët <adrien@gallouet.fr> Ahmed Kamal <email.ahmedkamal@googlemail.com> Ahmet Alp Balkan <ahmetb@microsoft.com> Aidan Feldman <aidan.feldman@gmail.com> Aidan Hobson Sayers <aidanhs@cantab.net> AJ Bowen <aj@soulshake.net> Ajey Charantimath <ajey.charantimath@gmail.com> ajneu <ajneu@users.noreply.github.com> Akash Gupta <akagup@microsoft.com> Akihiro Matsushima <amatsusbit@gmail.com> Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> Akim Demaille <akim.demaille@docker.com> Akira Koyasu <mail@akirakoyasu.net> Akshay Karle <akshay.a.karle@gmail.com> Al Tobey <al@ooyala.com> alambike <alambike@gmail.com> Alan Scherger <flyinprogrammer@gmail.com> Alan Thompson <cloojure@gmail.com> Albert Callarisa <shark234@gmail.com> Albert Zhang <zhgwenming@gmail.com> Alejandro González Hevia <alejandrgh11@gmail.com> Aleksa Sarai <asarai@suse.de> Aleksandrs Fadins <aleks@s-ko.net> Alena Prokharchyk <alena@rancher.com> Alessandro Boch <aboch@tetrationanalytics.com> Alessio Biancalana <dottorblaster@gmail.com> Alex Chan <alex@alexwlchan.net> Alex Chen <alexchenunix@gmail.com> Alex Coventry <alx@empirical.com> Alex Crawford <alex.crawford@coreos.com> Alex Ellis <alexellis2@gmail.com> Alex Gaynor <alex.gaynor@gmail.com> Alex Goodman <wagoodman@gmail.com> Alex Olshansky <i@creagenics.com> Alex Samorukov <samm@os2.kiev.ua> Alex Warhawk <ax.warhawk@gmail.com> Alexander Artemenko <svetlyak.40wt@gmail.com> Alexander Boyd <alex@opengroove.org> Alexander Larsson <alexl@redhat.com> Alexander Midlash <amidlash@docker.com> Alexander Morozov <lk4d4@docker.com> Alexander Shopov <ash@kambanaria.org> Alexandre Beslic <alexandre.beslic@gmail.com> Alexandre Garnier <zigarn@gmail.com> Alexandre González <agonzalezro@gmail.com> Alexandre Jomin <alexandrejomin@gmail.com> Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com> Alexei Margasov <alexei38@yandex.ru> Alexey Guskov <lexag@mail.ru> Alexey Kotlyarov <alexey@infoxchange.net.au> Alexey Shamrin <shamrin@gmail.com> Alexis THOMAS <fr.alexisthomas@gmail.com> Alfred Landrum <alfred.landrum@docker.com> Ali Dehghani <ali.dehghani.g@gmail.com> Alicia Lauerman <alicia@eta.im> Alihan Demir <alihan_6153@hotmail.com> Allen Madsen <blatyo@gmail.com> Allen Sun <allensun.shl@alibaba-inc.com> almoehi <almoehi@users.noreply.github.com> Alvaro Saurin <alvaro.saurin@gmail.com> Alvin Deng <alvin.q.deng@utexas.edu> Alvin Richards <alvin.richards@docker.com> amangoel <amangoel@gmail.com> Amen Belayneh <amenbelayneh@gmail.com> Amir Goldstein <amir73il@aquasec.com> Amit Bakshi <ambakshi@gmail.com> Amit Krishnan <amit.krishnan@oracle.com> Amit Shukla <amit.shukla@docker.com> Amr Gawish <amr.gawish@gmail.com> Amy Lindburg <amy.lindburg@docker.com> Anand Patil <anand.prabhakar.patil@gmail.com> AnandkumarPatel <anandkumarpatel@gmail.com> Anatoly Borodin <anatoly.borodin@gmail.com> Anchal Agrawal <aagrawa4@illinois.edu> Anda Xu <anda.xu@docker.com> Anders Janmyr <anders@janmyr.com> Andre Dublin <81dublin@gmail.com> Andre Granovsky <robotciti@live.com> Andrea Luzzardi <aluzzardi@gmail.com> Andrea Turli <andrea.turli@gmail.com> Andreas Elvers <andreas@work.de> Andreas Köhler <andi5.py@gmx.net> Andreas Savvides <andreas@editd.com> Andreas Tiefenthaler <at@an-ti.eu> Andrei Gherzan <andrei@resin.io> Andrei Vagin <avagin@gmail.com> Andrew C. Bodine <acbodine@us.ibm.com> Andrew Clay Shafer <andrewcshafer@gmail.com> Andrew Duckworth <grillopress@gmail.com> Andrew France <andrew@avito.co.uk> Andrew Gerrand <adg@golang.org> Andrew Guenther <guenther.andrew.j@gmail.com> Andrew He <he.andrew.mail@gmail.com> Andrew Hsu <andrewhsu@docker.com> Andrew Kuklewicz <kookster@gmail.com> Andrew Macgregor <andrew.macgregor@agworld.com.au> Andrew Macpherson <hopscotch23@gmail.com> Andrew Martin <sublimino@gmail.com> Andrew McDonnell <bugs@andrewmcdonnell.net> Andrew Munsell <andrew@wizardapps.net> Andrew Pennebaker <andrew.pennebaker@gmail.com> Andrew Po <absourd.noise@gmail.com> Andrew Weiss <andrew.weiss@docker.com> Andrew Williams <williams.andrew@gmail.com> Andrews Medina <andrewsmedina@gmail.com> Andrey Kolomentsev <andrey.kolomentsev@docker.com> Andrey Petrov <andrey.petrov@shazow.net> Andrey Stolbovsky <andrey.stolbovsky@gmail.com> André Martins <aanm90@gmail.com> andy <ztao@tibco-support.com> Andy Chambers <anchambers@paypal.com> andy diller <dillera@gmail.com> Andy Goldstein <agoldste@redhat.com> Andy Kipp <andy@rstudio.com> Andy Rothfusz <github@developersupport.net> Andy Smith <github@anarkystic.com> Andy Wilson <wilson.andrew.j+github@gmail.com> Anes Hasicic <anes.hasicic@gmail.com> Anil Belur <askb23@gmail.com> Anil Madhavapeddy <anil@recoil.org> Ankit Jain <ajatkj@yahoo.co.in> Ankush Agarwal <ankushagarwal11@gmail.com> Anonmily <michelle@michelleliu.io> Anran Qiao <anran.qiao@daocloud.io> Anshul Pundir <anshul.pundir@docker.com> Anthon van der Neut <anthon@mnt.org> Anthony Baire <Anthony.Baire@irisa.fr> Anthony Bishopric <git@anthonybishopric.com> Anthony Dahanne <anthony.dahanne@gmail.com> Anthony Sottile <asottile@umich.edu> Anton Löfgren <anton.lofgren@gmail.com> Anton Nikitin <anton.k.nikitin@gmail.com> Anton Polonskiy <anton.polonskiy@gmail.com> Anton Tiurin <noxiouz@yandex.ru> Antonio Murdaca <antonio.murdaca@gmail.com> Antonis Kalipetis <akalipetis@gmail.com> Antony Messerli <amesserl@rackspace.com> Anuj Bahuguna <anujbahuguna.dev@gmail.com> Anusha Ragunathan <anusha.ragunathan@docker.com> apocas <petermdias@gmail.com> Arash Deshmeh <adeshmeh@ca.ibm.com> ArikaChen <eaglesora@gmail.com> Arnaud Lefebvre <a.lefebvre@outlook.fr> Arnaud Porterie <arnaud.porterie@docker.com> Arthur Barr <arthur.barr@uk.ibm.com> Arthur Gautier <baloo@gandi.net> Artur Meyster <arthurfbi@yahoo.com> Arun Gupta <arun.gupta@gmail.com> Asad Saeeduddin <masaeedu@gmail.com> Asbjørn Enge <asbjorn@hanafjedle.net> averagehuman <averagehuman@users.noreply.github.com> Avi Das <andas222@gmail.com> Avi Kivity <avi@scylladb.com> Avi Miller <avi.miller@oracle.com> Avi Vaid <avaid1996@gmail.com> ayoshitake <airandfingers@gmail.com> Azat Khuyiyakhmetov <shadow_uz@mail.ru> Bardia Keyoumarsi <bkeyouma@ucsc.edu> Barnaby Gray <barnaby@pickle.me.uk> Barry Allard <barry.allard@gmail.com> Bartłomiej Piotrowski <b@bpiotrowski.pl> Bastiaan Bakker <bbakker@xebia.com> bdevloed <boris.de.vloed@gmail.com> Ben Bonnefoy <frenchben@docker.com> Ben Firshman <ben@firshman.co.uk> Ben Golub <ben.golub@dotcloud.com> Ben Gould <ben@bengould.co.uk> Ben Hall <ben@benhall.me.uk> Ben Sargent <ben@brokendigits.com> Ben Severson <BenSeverson@users.noreply.github.com> Ben Toews <mastahyeti@gmail.com> Ben Wiklund <ben@daisyowl.com> Benjamin Atkin <ben@benatkin.com> Benjamin Baker <Benjamin.baker@utexas.edu> Benjamin Boudreau <boudreau.benjamin@gmail.com> Benjamin Yolken <yolken@stripe.com> Benoit Chesneau <bchesneau@gmail.com> Bernerd Schaefer <bj.schaefer@gmail.com> Bernhard M. Wiedemann <bwiedemann@suse.de> Bert Goethals <bert@bertg.be> Bevisy Zhang <binbin36520@gmail.com> Bharath Thiruveedula <bharath_ves@hotmail.com> Bhiraj Butala <abhiraj.butala@gmail.com> Bhumika Bayani <bhumikabayani@gmail.com> Bilal Amarni <bilal.amarni@gmail.com> Bill Wang <ozbillwang@gmail.com> Bily Zhang <xcoder@tenxcloud.com> Bin Liu <liubin0329@gmail.com> Bingshen Wang <bingshen.wbs@alibaba-inc.com> Blake Geno <blakegeno@gmail.com> Boaz Shuster <ripcurld.github@gmail.com> bobby abbott <ttobbaybbob@gmail.com> Boris Pruessmann <boris@pruessmann.org> Boshi Lian <farmer1992@gmail.com> Bouke Haarsma <bouke@webatoom.nl> Boyd Hemphill <boyd@feedmagnet.com> boynux <boynux@gmail.com> Bradley Cicenas <bradley.cicenas@gmail.com> Bradley Wright <brad@intranation.com> Brandon Liu <bdon@bdon.org> Brandon Philips <brandon.philips@coreos.com> Brandon Rhodes <brandon@rhodesmill.org> Brendan Dixon <brendand@microsoft.com> Brent Salisbury <brent.salisbury@docker.com> Brett Higgins <brhiggins@arbor.net> Brett Kochendorfer <brett.kochendorfer@gmail.com> Brett Randall <javabrett@gmail.com> Brian (bex) Exelbierd <bexelbie@redhat.com> Brian Bland <brian.bland@docker.com> Brian DeHamer <brian@dehamer.com> Brian Dorsey <brian@dorseys.org> Brian Flad <bflad417@gmail.com> Brian Goff <cpuguy83@gmail.com> Brian McCallister <brianm@skife.org> Brian Olsen <brian@maven-group.org> Brian Schwind <brianmschwind@gmail.com> Brian Shumate <brian@couchbase.com> Brian Torres-Gil <brian@dralth.com> Brian Trump <btrump@yelp.com> Brice Jaglin <bjaglin@teads.tv> Briehan Lombaard <briehan.lombaard@gmail.com> Brielle Broder <bbroder@google.com> Bruno Bigras <bigras.bruno@gmail.com> Bruno Binet <bruno.binet@gmail.com> Bruno Gazzera <bgazzera@paginar.com> Bruno Renié <brutasse@gmail.com> Bruno Tavares <btavare@thoughtworks.com> Bryan Bess <squarejaw@bsbess.com> Bryan Boreham <bjboreham@gmail.com> Bryan Matsuo <bryan.matsuo@gmail.com> Bryan Murphy <bmurphy1976@gmail.com> Burke Libbey <burke@libbey.me> Byung Kang <byung.kang.ctr@amrdec.army.mil> Caleb Spare <cespare@gmail.com> Calen Pennington <cale@edx.org> Cameron Boehmer <cameron.boehmer@gmail.com> Cameron Spear <cameronspear@gmail.com> Campbell Allen <campbell.allen@gmail.com> Candid Dauth <cdauth@cdauth.eu> Cao Weiwei <cao.weiwei30@zte.com.cn> Carl Henrik Lunde <chlunde@ping.uio.no> Carl Loa Odin <carlodin@gmail.com> Carl X. Su <bcbcarl@gmail.com> Carlo Mion <mion00@gmail.com> Carlos Alexandro Becker <caarlos0@gmail.com> Carlos Sanchez <carlos@apache.org> Carol Fager-Higgins <carol.fager-higgins@docker.com> Cary <caryhartline@users.noreply.github.com> Casey Bisson <casey.bisson@joyent.com> Catalin Pirvu <pirvu.catalin94@gmail.com> Ce Gao <ce.gao@outlook.com> Cedric Davies <cedricda@microsoft.com> Cezar Sa Espinola <cezarsa@gmail.com> Chad Swenson <chadswen@gmail.com> Chance Zibolski <chance.zibolski@gmail.com> Chander Govindarajan <chandergovind@gmail.com> Chanhun Jeong <keyolk@gmail.com> Chao Wang <wangchao.fnst@cn.fujitsu.com> Charles Chan <charleswhchan@users.noreply.github.com> Charles Hooper <charles.hooper@dotcloud.com> Charles Law <claw@conduce.com> Charles Lindsay <chaz@chazomatic.us> Charles Merriam <charles.merriam@gmail.com> Charles Sarrazin <charles@sarraz.in> Charles Smith <charles.smith@docker.com> Charlie Drage <charlie@charliedrage.com> Charlie Lewis <charliel@lab41.org> Chase Bolt <chase.bolt@gmail.com> ChaYoung You <yousbe@gmail.com> Chen Chao <cc272309126@gmail.com> Chen Chuanliang <chen.chuanliang@zte.com.cn> Chen Hanxiao <chenhanxiao@cn.fujitsu.com> Chen Min <chenmin46@huawei.com> Chen Mingjie <chenmingjie0828@163.com> Chen Qiu <cheney-90@hotmail.com> Cheng-mean Liu <soccerl@microsoft.com> Chengfei Shang <cfshang@alauda.io> Chengguang Xu <cgxu519@gmx.com> chenyuzhu <chenyuzhi@oschina.cn> Chetan Birajdar <birajdar.chetan@gmail.com> Chewey <prosto-chewey@users.noreply.github.com> Chia-liang Kao <clkao@clkao.org> chli <chli@freewheel.tv> Cholerae Hu <choleraehyq@gmail.com> Chris Alfonso <calfonso@redhat.com> Chris Armstrong <chris@opdemand.com> Chris Dias <cdias@microsoft.com> Chris Dituri <csdituri@gmail.com> Chris Fordham <chris@fordham-nagy.id.au> Chris Gavin <chris@chrisgavin.me> Chris Gibson <chris@chrisg.io> Chris Khoo <chris.khoo@gmail.com> Chris McKinnel <chris.mckinnel@tangentlabs.co.uk> Chris McKinnel <chrismckinnel@gmail.com> Chris Seto <chriskseto@gmail.com> Chris Snow <chsnow123@gmail.com> Chris St. Pierre <chris.a.st.pierre@gmail.com> Chris Stivers <chris@stivers.us> Chris Swan <chris.swan@iee.org> Chris Telfer <ctelfer@docker.com> Chris Wahl <github@wahlnetwork.com> Chris Weyl <cweyl@alumni.drew.edu> Chris White <me@cwprogram.com> Christian Berendt <berendt@b1-systems.de> Christian Brauner <christian.brauner@ubuntu.com> Christian Böhme <developement@boehme3d.de> Christian Muehlhaeuser <muesli@gmail.com> Christian Persson <saser@live.se> Christian Rotzoll <ch.rotzoll@gmail.com> Christian Simon <simon@swine.de> Christian Stefanescu <st.chris@gmail.com> Christophe Mehay <cmehay@online.net> Christophe Troestler <christophe.Troestler@umons.ac.be> Christophe Vidal <kriss@krizalys.com> Christopher Biscardi <biscarch@sketcht.com> Christopher Crone <christopher.crone@docker.com> Christopher Currie <codemonkey+github@gmail.com> Christopher Jones <tophj@linux.vnet.ibm.com> Christopher Latham <sudosurootdev@gmail.com> Christopher Rigor <crigor@gmail.com> Christy Perez <christy@linux.vnet.ibm.com> Chun Chen <ramichen@tencent.com> Ciro S. Costa <ciro.costa@usp.br> Clayton Coleman <ccoleman@redhat.com> Clinton Kitson <clintonskitson@gmail.com> Cody Roseborough <crrosebo@amazon.com> Coenraad Loubser <coenraad@wish.org.za> Colin Dunklau <colin.dunklau@gmail.com> Colin Hebert <hebert.colin@gmail.com> Colin Panisset <github@clabber.com> Colin Rice <colin@daedrum.net> Colin Walters <walters@verbum.org> Collin Guarino <collin.guarino@gmail.com> Colm Hally <colmhally@gmail.com> companycy <companycy@gmail.com> Corbin Coleman <corbin.coleman@docker.com> Corey Farrell <git@cfware.com> Cory Forsyth <cory.forsyth@gmail.com> cressie176 <github@stephen-cresswell.net> CrimsonGlory <CrimsonGlory@users.noreply.github.com> Cristian Staretu <cristian.staretu@gmail.com> cristiano balducci <cristiano.balducci@gmail.com> Cruceru Calin-Cristian <crucerucalincristian@gmail.com> CUI Wei <ghostplant@qq.com> Cyprian Gracz <cyprian.gracz@micro-jumbo.eu> Cyril F <cyrilf7x@gmail.com> Daan van Berkel <daan.v.berkel.1980@gmail.com> Daehyeok Mun <daehyeok@gmail.com> Dafydd Crosby <dtcrsby@gmail.com> dalanlan <dalanlan925@gmail.com> Damian Smyth <damian@dsau.co> Damien Nadé <github@livna.org> Damien Nozay <damien.nozay@gmail.com> Damjan Georgievski <gdamjan@gmail.com> Dan Anolik <dan@anolik.net> Dan Buch <d.buch@modcloth.com> Dan Cotora <dan@bluevision.ro> Dan Feldman <danf@jfrog.com> Dan Griffin <dgriffin@peer1.com> Dan Hirsch <thequux@upstandinghackers.com> Dan Keder <dan.keder@gmail.com> Dan Levy <dan@danlevy.net> Dan McPherson <dmcphers@redhat.com> Dan Stine <sw@stinemail.com> Dan Williams <me@deedubs.com> Dani Hodovic <dani.hodovic@gmail.com> Dani Louca <dani.louca@docker.com> Daniel Antlinger <d.antlinger@gmx.at> Daniel Dao <dqminh@cloudflare.com> Daniel Exner <dex@dragonslave.de> Daniel Farrell <dfarrell@redhat.com> Daniel Garcia <daniel@danielgarcia.info> Daniel Gasienica <daniel@gasienica.ch> Daniel Grunwell <mwgrunny@gmail.com> Daniel Hiltgen <daniel.hiltgen@docker.com> Daniel J Walsh <dwalsh@redhat.com> Daniel Menet <membership@sontags.ch> Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> Daniel Nephin <dnephin@docker.com> Daniel Norberg <dano@spotify.com> Daniel Nordberg <dnordberg@gmail.com> Daniel Robinson <gottagetmac@gmail.com> Daniel S <dan.streby@gmail.com> Daniel Von Fange <daniel@leancoder.com> Daniel Watkins <daniel@daniel-watkins.co.uk> Daniel X Moore <yahivin@gmail.com> Daniel YC Lin <dlin.tw@gmail.com> Daniel Zhang <jmzwcn@gmail.com> Danny Berger <dpb587@gmail.com> Danny Yates <danny@codeaholics.org> Danyal Khaliq <danyal.khaliq@tenpearls.com> Darren Coxall <darren@darrencoxall.com> Darren Shepherd <darren.s.shepherd@gmail.com> Darren Stahl <darst@microsoft.com> Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com> Davanum Srinivas <davanum@gmail.com> Dave Barboza <dbarboza@datto.com> Dave Goodchild <buddhamagnet@gmail.com> Dave Henderson <dhenderson@gmail.com> Dave MacDonald <mindlapse@gmail.com> Dave Tucker <dt@docker.com> David Anderson <dave@natulte.net> David Calavera <david.calavera@gmail.com> David Chung <david.chung@docker.com> David Corking <dmc-source@dcorking.com> David Cramer <davcrame@cisco.com> David Currie <david_currie@uk.ibm.com> David Davis <daviddavis@redhat.com> David Dooling <dooling@gmail.com> David Gageot <david@gageot.net> David Gebler <davidgebler@gmail.com> David Glasser <glasser@davidglasser.net> David Lawrence <david.lawrence@docker.com> David Lechner <david@lechnology.com> David M. Karr <davidmichaelkarr@gmail.com> David Mackey <tdmackey@booleanhaiku.com> David Mat <david@davidmat.com> David Mcanulty <github@hellspark.com> David McKay <david@rawkode.com> David P Hilton <david.hilton.p@gmail.com> David Pelaez <pelaez89@gmail.com> David R. Jenni <david.r.jenni@gmail.com> David Röthlisberger <david@rothlis.net> David Sheets <dsheets@docker.com> David Sissitka <me@dsissitka.com> David Trott <github@davidtrott.com> David Wang <00107082@163.com> David Williamson <david.williamson@docker.com> David Xia <dxia@spotify.com> David Young <yangboh@cn.ibm.com> Davide Ceretti <davide.ceretti@hogarthww.com> Dawn Chen <dawnchen@google.com> dbdd <wangtong2712@gmail.com> dcylabs <dcylabs@gmail.com> Debayan De <debayande@users.noreply.github.com> Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com> deed02392 <georgehafiz@gmail.com> Deep Debroy <ddebroy@docker.com> Deng Guangxing <dengguangxing@huawei.com> Deni Bertovic <deni@kset.org> Denis Defreyne <denis@soundcloud.com> Denis Gladkikh <denis@gladkikh.email> Denis Ollier <larchunix@users.noreply.github.com> Dennis Chen <barracks510@gmail.com> Dennis Chen <dennis.chen@arm.com> Dennis Docter <dennis@d23.nl> Derek <crq@kernel.org> Derek <crquan@gmail.com> Derek Ch <denc716@gmail.com> Derek McGowan <derek@mcgstyle.net> Deric Crago <deric.crago@gmail.com> Deshi Xiao <dxiao@redhat.com> devmeyster <arthurfbi@yahoo.com> Devvyn Murphy <devvyn@devvyn.com> Dharmit Shah <shahdharmit@gmail.com> Dhawal Yogesh Bhanushali <dbhanushali@vmware.com> Diego Romero <idiegoromero@gmail.com> Diego Siqueira <dieg0@live.com> Dieter Reuter <dieter.reuter@me.com> Dillon Dixon <dillondixon@gmail.com> Dima Stopel <dima@twistlock.com> Dimitri John Ledkov <dimitri.j.ledkov@intel.com> Dimitris Mandalidis <dimitris.mandalidis@gmail.com> Dimitris Rozakis <dimrozakis@gmail.com> Dimitry Andric <d.andric@activevideo.com> Dinesh Subhraveti <dineshs@altiscale.com> Ding Fei <dingfei@stars.org.cn> Diogo Monica <diogo@docker.com> DiuDiugirl <sophia.wang@pku.edu.cn> Djibril Koné <kone.djibril@gmail.com> dkumor <daniel@dkumor.com> Dmitri Logvinenko <dmitri.logvinenko@gmail.com> Dmitri Shuralyov <shurcooL@gmail.com> Dmitry Demeshchuk <demeshchuk@gmail.com> Dmitry Gusev <dmitry.gusev@gmail.com> Dmitry Kononenko <d@dm42.ru> Dmitry Sharshakov <d3dx12.xx@gmail.com> Dmitry Shyshkin <dmitry@shyshkin.org.ua> Dmitry Smirnov <onlyjob@member.fsf.org> Dmitry V. Krivenok <krivenok.dmitry@gmail.com> Dmitry Vorobev <dimahabr@gmail.com> Dolph Mathews <dolph.mathews@gmail.com> Dominik Dingel <dingel@linux.vnet.ibm.com> Dominik Finkbeiner <finkes93@gmail.com> Dominik Honnef <dominik@honnef.co> Don Kirkby <donkirkby@users.noreply.github.com> Don Kjer <don.kjer@gmail.com> Don Spaulding <donspauldingii@gmail.com> Donald Huang <don.hcd@gmail.com> Dong Chen <dongluo.chen@docker.com> Donghwa Kim <shanytt@gmail.com> Donovan Jones <git@gamma.net.nz> Doron Podoleanu <doronp@il.ibm.com> Doug Davis <dug@us.ibm.com> Doug MacEachern <dougm@vmware.com> Doug Tangren <d.tangren@gmail.com> Douglas Curtis <dougcurtis1@gmail.com> Dr Nic Williams <drnicwilliams@gmail.com> dragon788 <dragon788@users.noreply.github.com> Dražen Lučanin <kermit666@gmail.com> Drew Erny <drew.erny@docker.com> Drew Hubl <drew.hubl@gmail.com> Dustin Sallings <dustin@spy.net> Ed Costello <epc@epcostello.com> Edmund Wagner <edmund-wagner@web.de> Eiichi Tsukata <devel@etsukata.com> Eike Herzbach <eike@herzbach.net> Eivin Giske Skaaren <eivinsn@axis.com> Eivind Uggedal <eivind@uggedal.com> Elan Ruusamäe <glen@pld-linux.org> Elango Sivanandam <elango.siva@docker.com> Elena Morozova <lelenanam@gmail.com> Eli Uriegas <eli.uriegas@docker.com> Elias Faxö <elias.faxo@tre.se> Elias Probst <mail@eliasprobst.eu> Elijah Zupancic <elijah@zupancic.name> eluck <mail@eluck.me> Elvir Kuric <elvirkuric@gmail.com> Emil Davtyan <emil2k@gmail.com> Emil Hernvall <emil@quench.at> Emily Maier <emily@emilymaier.net> Emily Rose <emily@contactvibe.com> Emir Ozer <emirozer@yandex.com> Enguerran <engcolson@gmail.com> Eohyung Lee <liquidnuker@gmail.com> epeterso <epeterson@breakpoint-labs.com> Eric Barch <barch@tomesoftware.com> Eric Curtin <ericcurtin17@gmail.com> Eric G. Noriega <enoriega@vizuri.com> Eric Hanchrow <ehanchrow@ine.com> Eric Lee <thenorthsecedes@gmail.com> Eric Myhre <hash@exultant.us> Eric Paris <eparis@redhat.com> Eric Rafaloff <erafaloff@gmail.com> Eric Rosenberg <ehaydenr@gmail.com> Eric Sage <eric.david.sage@gmail.com> Eric Soderstrom <ericsoderstrom@gmail.com> Eric Yang <windfarer@gmail.com> Eric-Olivier Lamey <eo@lamey.me> Erica Windisch <erica@windisch.us> Erik Bray <erik.m.bray@gmail.com> Erik Dubbelboer <erik@dubbelboer.com> Erik Hollensbe <github@hollensbe.org> Erik Inge Bolsø <knan@redpill-linpro.com> Erik Kristensen <erik@erikkristensen.com> Erik St. Martin <alakriti@gmail.com> Erik Weathers <erikdw@gmail.com> Erno Hopearuoho <erno.hopearuoho@gmail.com> Erwin van der Koogh <info@erronis.nl> Ethan Bell <ebgamer29@gmail.com> Euan Kemp <euan.kemp@coreos.com> Eugen Krizo <eugen.krizo@gmail.com> Eugene Yakubovich <eugene.yakubovich@coreos.com> Evan Allrich <evan@unguku.com> Evan Carmi <carmi@users.noreply.github.com> Evan Hazlett <ejhazlett@gmail.com> Evan Krall <krall@yelp.com> Evan Phoenix <evan@fallingsnow.net> Evan Wies <evan@neomantra.net> Evelyn Xu <evelynhsu21@gmail.com> Everett Toews <everett.toews@rackspace.com> Evgeny Shmarnev <shmarnev@gmail.com> Evgeny Vereshchagin <evvers@ya.ru> Ewa Czechowska <ewa@ai-traders.com> Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com> ezbercih <cem.ezberci@gmail.com> Ezra Silvera <ezra@il.ibm.com> Fabian Kramm <kramm@covexo.com> Fabian Lauer <kontakt@softwareschmiede-saar.de> Fabian Raetz <fabian.raetz@gmail.com> Fabiano Rosas <farosas@br.ibm.com> Fabio Falci <fabiofalci@gmail.com> Fabio Kung <fabio.kung@gmail.com> Fabio Rapposelli <fabio@vmware.com> Fabio Rehm <fgrehm@gmail.com> Fabrizio Regini <freegenie@gmail.com> Fabrizio Soppelsa <fsoppelsa@mirantis.com> Faiz Khan <faizkhan00@gmail.com> falmp <chico.lopes@gmail.com> Fangming Fang <fangming.fang@arm.com> Fangyuan Gao <21551127@zju.edu.cn> fanjiyun <fan.jiyun@zte.com.cn> Fareed Dudhia <fareeddudhia@googlemail.com> Fathi Boudra <fathi.boudra@linaro.org> Federico Gimenez <fgimenez@coit.es> Felipe Oliveira <felipeweb.programador@gmail.com> Felix Abecassis <fabecassis@nvidia.com> Felix Geisendörfer <felix@debuggable.com> Felix Hupfeld <felix@quobyte.com> Felix Rabe <felix@rabe.io> Felix Ruess <felix.ruess@gmail.com> Felix Schindler <fschindler@weluse.de> Feng Yan <fy2462@gmail.com> Fengtu Wang <wangfengtu@huawei.com> Ferenc Szabo <pragmaticfrank@gmail.com> Fernando <fermayo@gmail.com> Fero Volar <alian@alian.info> Ferran Rodenas <frodenas@gmail.com> Filipe Brandenburger <filbranden@google.com> Filipe Oliveira <contato@fmoliveira.com.br> Flavio Castelli <fcastelli@suse.com> Flavio Crisciani <flavio.crisciani@docker.com> Florian <FWirtz@users.noreply.github.com> Florian Klein <florian.klein@free.fr> Florian Maier <marsmensch@users.noreply.github.com> Florian Noeding <noeding@adobe.com> Florian Weingarten <flo@hackvalue.de> Florin Asavoaie <florin.asavoaie@gmail.com> Florin Patan <florinpatan@gmail.com> fonglh <fonglh@gmail.com> Foysal Iqbal <foysal.iqbal.fb@gmail.com> Francesc Campoy <campoy@google.com> Francesco Mari <mari.francesco@gmail.com> Francis Chuang <francis.chuang@boostport.com> Francisco Carriedo <fcarriedo@gmail.com> Francisco Souza <f@souza.cc> Frank Groeneveld <frank@ivaldi.nl> Frank Herrmann <fgh@4gh.tv> Frank Macreery <frank@macreery.com> Frank Rosquin <frank.rosquin+github@gmail.com> Fred Lifton <fred.lifton@docker.com> Frederick F. Kautz IV <fkautz@redhat.com> Frederik Loeffert <frederik@zitrusmedia.de> Frederik Nordahl Jul Sabroe <frederikns@gmail.com> Freek Kalter <freek@kalteronline.org> Frieder Bluemle <frieder.bluemle@gmail.com> Fu JinLin <withlin@yeah.net> Félix Baylac-Jacqué <baylac.felix@gmail.com> Félix Cantournet <felix.cantournet@cloudwatt.com> Gabe Rosenhouse <gabe@missionst.com> Gabor Nagy <mail@aigeruth.hu> Gabriel Linder <linder.gabriel@gmail.com> Gabriel Monroy <gabriel@opdemand.com> Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com> Gaetan de Villele <gdevillele@gmail.com> Galen Sampson <galen.sampson@gmail.com> Gang Qiao <qiaohai8866@gmail.com> Gareth Rushgrove <gareth@morethanseven.net> Garrett Barboza <garrett@garrettbarboza.com> Gary Schaetz <gary@schaetzkc.com> Gaurav <gaurav.gosec@gmail.com> gautam, prasanna <prasannagautam@gmail.com> Gaël PORTAY <gael.portay@savoirfairelinux.com> Genki Takiuchi <genki@s21g.com> GennadySpb <lipenkov@gmail.com> Geoffrey Bachelet <grosfrais@gmail.com> Geon Kim <geon0250@gmail.com> George Kontridze <george@bugsnag.com> George MacRorie <gmacr31@gmail.com> George Xie <georgexsh@gmail.com> Georgi Hristozov <georgi@forkbomb.nl> Gereon Frey <gereon.frey@dynport.de> German DZ <germ@ndz.com.ar> Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl> Gerwim Feiken <g.feiken@tfe.nl> Ghislain Bourgeois <ghislain.bourgeois@gmail.com> Giampaolo Mancini <giampaolo@trampolineup.com> Gianluca Borello <g.borello@gmail.com> Gildas Cuisinier <gildas.cuisinier@gcuisinier.net> Giovan Isa Musthofa <giovanism@outlook.co.id> gissehel <public-devgit-dantus@gissehel.org> Giuseppe Mazzotta <gdm85@users.noreply.github.com> Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org> Gleb M Borisov <borisov.gleb@gmail.com> Glyn Normington <gnormington@gopivotal.com> GoBella <caili_welcome@163.com> Goffert van Gool <goffert@phusion.nl> Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com> Gosuke Miyashita <gosukenator@gmail.com> Gou Rao <gou@portworx.com> Govinda Fichtner <govinda.fichtner@googlemail.com> Grant Millar <grant@cylo.io> Grant Reaber <grant.reaber@gmail.com> Graydon Hoare <graydon@pobox.com> Greg Fausak <greg@tacodata.com> Greg Pflaum <gpflaum@users.noreply.github.com> Greg Stephens <greg@udon.org> Greg Thornton <xdissent@me.com> Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com> Guilhem Lettron <guilhem+github@lettron.fr> Guilherme Salgado <gsalgado@gmail.com> Guillaume Dufour <gdufour.prestataire@voyages-sncf.com> Guillaume J. Charmes <guillaume.charmes@docker.com> guoxiuyan <guoxiuyan@huawei.com> Guri <odg0318@gmail.com> Gurjeet Singh <gurjeet@singh.im> Guruprasad <lgp171188@gmail.com> Gustav Sinder <gustav.sinder@gmail.com> gwx296173 <gaojing3@huawei.com> Günter Zöchbauer <guenter@gzoechbauer.com> haikuoliu <haikuo@amazon.com> Hakan Özler <hakan.ozler@kodcu.com> Hamish Hutchings <moredhel@aoeu.me> Hans Kristian Flaatten <hans@starefossen.com> Hans Rødtang <hansrodtang@gmail.com> Hao Shu Wei <haosw@cn.ibm.com> Hao Zhang <21521210@zju.edu.cn> Harald Albers <github@albersweb.de> Harley Laue <losinggeneration@gmail.com> Harold Cooper <hrldcpr@gmail.com> Harrison Turton <harrisonturton@gmail.com> Harry Zhang <harryz@hyper.sh> Harshal Patil <harshal.patil@in.ibm.com> Harshal Patil <harshalp@linux.vnet.ibm.com> He Simei <hesimei@zju.edu.cn> He Xiaoxi <tossmilestone@gmail.com> He Xin <he_xinworld@126.com> heartlock <21521209@zju.edu.cn> Hector Castro <hectcastro@gmail.com> Helen Xie <chenjg@harmonycloud.cn> Henning Sprang <henning.sprang@gmail.com> Hiroshi Hatake <hatake@clear-code.com> Hiroyuki Sasagawa <hs19870702@gmail.com> Hobofan <goisser94@gmail.com> Hollie Teal <hollie@docker.com> Hong Xu <hong@topbug.net> Hongbin Lu <hongbin034@gmail.com> hsinko <21551195@zju.edu.cn> Hu Keping <hukeping@huawei.com> Hu Tao <hutao@cn.fujitsu.com> Huanzhong Zhang <zhanghuanzhong90@gmail.com> Huayi Zhang <irachex@gmail.com> Hugo Duncan <hugo@hugoduncan.org> Hugo Marisco <0x6875676f@gmail.com> Hunter Blanks <hunter@twilio.com> huqun <huqun@zju.edu.cn> Huu Nguyen <huu@prismskylabs.com> hyeongkyu.lee <hyeongkyu.lee@navercorp.com> Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> Iago López Galeiras <iago@kinvolk.io> Ian Babrou <ibobrik@gmail.com> Ian Bishop <ianbishop@pace7.com> Ian Bull <irbull@gmail.com> Ian Calvert <ianjcalvert@gmail.com> Ian Campbell <ian.campbell@docker.com> Ian Chen <ianre657@gmail.com> Ian Lee <IanLee1521@gmail.com> Ian Main <imain@redhat.com> Ian Philpot <ian.philpot@microsoft.com> Ian Truslove <ian.truslove@gmail.com> Iavael <iavaelooeyt@gmail.com> Icaro Seara <icaro.seara@gmail.com> Ignacio Capurro <icapurrofagian@gmail.com> Igor Dolzhikov <bluesriverz@gmail.com> Igor Karpovich <i.karpovich@currencysolutions.com> Iliana Weller <iweller@amazon.com> Ilkka Laukkanen <ilkka@ilkka.io> Ilya Dmitrichenko <errordeveloper@gmail.com> Ilya Gusev <mail@igusev.ru> Ilya Khlopotov <ilya.khlopotov@gmail.com> imre Fitos <imre.fitos+github@gmail.com> inglesp <peter.inglesby@gmail.com> Ingo Gottwald <in.gottwald@gmail.com> Innovimax <innovimax@gmail.com> Isaac Dupree <antispam@idupree.com> Isabel Jimenez <contact.isabeljimenez@gmail.com> Isao Jonas <isao.jonas@gmail.com> Iskander Sharipov <quasilyte@gmail.com> Ivan Babrou <ibobrik@gmail.com> Ivan Fraixedes <ifcdev@gmail.com> Ivan Grcic <igrcic@gmail.com> Ivan Markin <sw@nogoegst.net> J Bruni <joaohbruni@yahoo.com.br> J. Nunn <jbnunn@gmail.com> Jack Danger Canty <jackdanger@squareup.com> Jack Laxson <jackjrabbit@gmail.com> Jacob Atzen <jacob@jacobatzen.dk> Jacob Edelman <edelman.jd@gmail.com> Jacob Tomlinson <jacob@tom.linson.uk> Jacob Vallejo <jakeev@amazon.com> Jacob Wen <jian.w.wen@oracle.com> Jaivish Kothari <janonymous.codevulture@gmail.com> Jake Champlin <jake.champlin.27@gmail.com> Jake Moshenko <jake@devtable.com> Jake Sanders <jsand@google.com> jakedt <jake@devtable.com> James Allen <jamesallen0108@gmail.com> James Carey <jecarey@us.ibm.com> James Carr <james.r.carr@gmail.com> James DeFelice <james.defelice@ishisystems.com> James Harrison Fisher <jameshfisher@gmail.com> James Kyburz <james.kyburz@gmail.com> James Kyle <james@jameskyle.org> James Lal <james@lightsofapollo.com> James Mills <prologic@shortcircuit.net.au> James Nesbitt <james.nesbitt@wunderkraut.com> James Nugent <james@jen20.com> James Turnbull <james@lovedthanlost.net> James Watkins-Harvey <jwatkins@progi-media.com> Jamie Hannaford <jamie@limetree.org> Jamshid Afshar <jafshar@yahoo.com> Jan Keromnes <janx@linux.com> Jan Koprowski <jan.koprowski@gmail.com> Jan Pazdziora <jpazdziora@redhat.com> Jan Toebes <jan@toebes.info> Jan-Gerd Tenberge <janten@gmail.com> Jan-Jaap Driessen <janjaapdriessen@gmail.com> Jana Radhakrishnan <mrjana@docker.com> Jannick Fahlbusch <git@jf-projects.de> Januar Wayong <januar@gmail.com> Jared Biel <jared.biel@bolderthinking.com> Jared Hocutt <jaredh@netapp.com> Jaroslaw Zabiello <hipertracker@gmail.com> jaseg <jaseg@jaseg.net> Jasmine Hegman <jasmine@jhegman.com> Jason Divock <jdivock@gmail.com> Jason Giedymin <jasong@apache.org> Jason Green <Jason.Green@AverInformatics.Com> Jason Hall <imjasonh@gmail.com> Jason Heiss <jheiss@aput.net> Jason Livesay <ithkuil@gmail.com> Jason McVetta <jason.mcvetta@gmail.com> Jason Plum <jplum@devonit.com> Jason Shepherd <jason@jasonshepherd.net> Jason Smith <jasonrichardsmith@gmail.com> Jason Sommer <jsdirv@gmail.com> Jason Stangroome <jason@codeassassin.com> jaxgeller <jacksongeller@gmail.com> Jay <imjching@hotmail.com> Jay <teguhwpurwanto@gmail.com> Jay Kamat <github@jgkamat.33mail.com> Jean Rouge <rougej+github@gmail.com> Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com> Jean-Baptiste Dalido <jeanbaptiste@appgratis.com> Jean-Christophe Berthon <huygens@berthon.eu> Jean-Paul Calderone <exarkun@twistedmatrix.com> Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr> Jean-Tiare Le Bigot <jt@yadutaf.fr> Jeeva S. Chelladhurai <sjeeva@gmail.com> Jeff Anderson <jeff@docker.com> Jeff Hajewski <jeff.hajewski@gmail.com> Jeff Johnston <jeff.johnston.mn@gmail.com> Jeff Lindsay <progrium@gmail.com> Jeff Mickey <j@codemac.net> Jeff Minard <jeff@creditkarma.com> Jeff Nickoloff <jeff.nickoloff@gmail.com> Jeff Silberman <jsilberm@gmail.com> Jeff Welch <whatthejeff@gmail.com> Jeffrey Bolle <jeffreybolle@gmail.com> Jeffrey Morgan <jmorganca@gmail.com> Jeffrey van Gogh <jvg@google.com> Jenny Gebske <jennifer@gebske.de> Jeremy Chambers <jeremy@thehipbot.com> Jeremy Grosser <jeremy@synack.me> Jeremy Price <jprice.rhit@gmail.com> Jeremy Qian <vanpire110@163.com> Jeremy Unruh <jeremybunruh@gmail.com> Jeremy Yallop <yallop@docker.com> Jeroen Franse <jeroenfranse@gmail.com> Jeroen Jacobs <github@jeroenj.be> Jesse Dearing <jesse.dearing@gmail.com> Jesse Dubay <jesse@thefortytwo.net> Jessica Frazelle <acidburn@microsoft.com> Jezeniel Zapanta <jpzapanta22@gmail.com> Jhon Honce <jhonce@redhat.com> Ji.Zhilong <zhilongji@gmail.com> Jian Liao <jliao@alauda.io> Jian Zhang <zhangjian.fnst@cn.fujitsu.com> Jiang Jinyang <jjyruby@gmail.com> Jie Luo <luo612@zju.edu.cn> Jihyun Hwang <jhhwang@telcoware.com> Jilles Oldenbeuving <ojilles@gmail.com> Jim Alateras <jima@comware.com.au> Jim Galasyn <jim.galasyn@docker.com> Jim Minter <jminter@redhat.com> Jim Perrin <jperrin@centos.org> Jimmy Cuadra <jimmy@jimmycuadra.com> Jimmy Puckett <jimmy.puckett@spinen.com> Jimmy Song <rootsongjc@gmail.com> Jinsoo Park <cellpjs@gmail.com> Jintao Zhang <zhangjintao9020@gmail.com> Jiri Appl <jiria@microsoft.com> Jiri Popelka <jpopelka@redhat.com> Jiuyue Ma <majiuyue@huawei.com> Jiří Župka <jzupka@redhat.com> Joao Fernandes <joao.fernandes@docker.com> Joao Trindade <trindade.joao@gmail.com> Joe Beda <joe.github@bedafamily.com> Joe Doliner <jdoliner@pachyderm.io> Joe Ferguson <joe@infosiftr.com> Joe Gordon <joe.gordon0@gmail.com> Joe Shaw <joe@joeshaw.org> Joe Van Dyk <joe@tanga.com> Joel Friedly <joelfriedly@gmail.com> Joel Handwell <joelhandwell@gmail.com> Joel Hansson <joel.hansson@ecraft.com> Joel Wurtz <jwurtz@jolicode.com> Joey Geiger <jgeiger@gmail.com> Joey Geiger <jgeiger@users.noreply.github.com> Joey Gibson <joey@joeygibson.com> Joffrey F <joffrey@docker.com> Johan Euphrosine <proppy@google.com> Johan Rydberg <johan.rydberg@gmail.com> Johanan Lieberman <johanan.lieberman@gmail.com> Johannes 'fish' Ziemke <github@freigeist.org> John Costa <john.costa@gmail.com> John Feminella <jxf@jxf.me> John Gardiner Myers <jgmyers@proofpoint.com> John Gossman <johngos@microsoft.com> John Harris <john@johnharris.io> John Howard (VM) <John.Howard@microsoft.com> John Laswell <john.n.laswell@gmail.com> John Maguire <jmaguire@duosecurity.com> John Mulhausen <john@docker.com> John OBrien III <jobrieniii@yahoo.com> John Starks <jostarks@microsoft.com> John Stephens <johnstep@docker.com> John Tims <john.k.tims@gmail.com> John V. Martinez <jvmatl@gmail.com> John Warwick <jwarwick@gmail.com> John Willis <john.willis@docker.com> Jon Johnson <jonjohnson@google.com> Jon Surrell <jon.surrell@gmail.com> Jon Wedaman <jweede@gmail.com> Jonas Pfenniger <jonas@pfenniger.name> Jonathan A. Schweder <jonathanschweder@gmail.com> Jonathan A. Sternberg <jonathansternberg@gmail.com> Jonathan Boulle <jonathanboulle@gmail.com> Jonathan Camp <jonathan@irondojo.com> Jonathan Choy <jonathan.j.choy@gmail.com> Jonathan Dowland <jon+github@alcopop.org> Jonathan Lebon <jlebon@redhat.com> Jonathan Lomas <jonathan@floatinglomas.ca> Jonathan McCrohan <jmccrohan@gmail.com> Jonathan Mueller <j.mueller@apoveda.ch> Jonathan Pares <jonathanpa@users.noreply.github.com> Jonathan Rudenberg <jonathan@titanous.com> Jonathan Stoppani <jonathan.stoppani@divio.com> Jonh Wendell <jonh.wendell@redhat.com> Joni Sar <yoni@cocycles.com> Joost Cassee <joost@cassee.net> Jordan Arentsen <blissdev@gmail.com> Jordan Jennings <jjn2009@gmail.com> Jordan Sissel <jls@semicomplete.com> Jorge Marin <chipironcin@users.noreply.github.com> Jorit Kleine-Möllhoff <joppich@bricknet.de> Jose Diaz-Gonzalez <email@josediazgonzalez.com> Joseph Anthony Pasquale Holsten <joseph@josephholsten.com> Joseph Hager <ajhager@gmail.com> Joseph Kern <jkern@semafour.net> Joseph Rothrock <rothrock@rothrock.org> Josh <jokajak@gmail.com> Josh Bodah <jb3689@yahoo.com> Josh Bonczkowski <josh.bonczkowski@gmail.com> Josh Chorlton <jchorlton@gmail.com> Josh Eveleth <joshe@opendns.com> Josh Hawn <josh.hawn@docker.com> Josh Horwitz <horwitz@addthis.com> Josh Poimboeuf <jpoimboe@redhat.com> Josh Soref <jsoref@gmail.com> Josh Wilson <josh.wilson@fivestars.com> Josiah Kiehl <jkiehl@riotgames.com> José Tomás Albornoz <jojo@eljojo.net> Joyce Jang <mail@joycejang.com> JP <jpellerin@leapfrogonline.com> Julian Taylor <jtaylor.debian@googlemail.com> Julien Barbier <write0@gmail.com> Julien Bisconti <veggiemonk@users.noreply.github.com> Julien Bordellier <julienbordellier@gmail.com> Julien Dubois <julien.dubois@gmail.com> Julien Kassar <github@kassisol.com> Julien Maitrehenry <julien.maitrehenry@me.com> Julien Pervillé <julien.perville@perfect-memory.com> Julio Montes <imc.coder@gmail.com> Jun-Ru Chang <jrjang@gmail.com> Jussi Nummelin <jussi.nummelin@gmail.com> Justas Brazauskas <brazauskasjustas@gmail.com> Justin Cormack <justin.cormack@docker.com> Justin Force <justin.force@gmail.com> Justin Menga <justin.menga@gmail.com> Justin Plock <jplock@users.noreply.github.com> Justin Simonelis <justin.p.simonelis@gmail.com> Justin Terry <juterry@microsoft.com> Justyn Temme <justyntemme@gmail.com> Jyrki Puttonen <jyrkiput@gmail.com> Jérôme Petazzoni <jerome.petazzoni@docker.com> Jörg Thalheim <joerg@higgsboson.tk> K. Heller <pestophagous@gmail.com> Kai Blin <kai@samba.org> Kai Qiang Wu (Kennan) <wkq5325@gmail.com> Kamil Domański <kamil@domanski.co> Kamjar Gerami <kami.gerami@gmail.com> Kanstantsin Shautsou <kanstantsin.sha@gmail.com> Kara Alexandra <kalexandra@us.ibm.com> Karan Lyons <karan@karanlyons.com> Kareem Khazem <karkhaz@karkhaz.com> kargakis <kargakis@users.noreply.github.com> Karl Grzeszczak <karlgrz@gmail.com> Karol Duleba <mr.fuxi@gmail.com> Karthik Karanth <karanth.karthik@gmail.com> Karthik Nayak <karthik.188@gmail.com> Kasper Fabæch Brandt <poizan@poizan.dk> Kate Heddleston <kate.heddleston@gmail.com> Katie McLaughlin <katie@glasnt.com> Kato Kazuyoshi <kato.kazuyoshi@gmail.com> Katrina Owen <katrina.owen@gmail.com> Kawsar Saiyeed <kawsar.saiyeed@projiris.com> Kay Yan <kay.yan@daocloud.io> kayrus <kay.diam@gmail.com> Kazuhiro Sera <seratch@gmail.com> Ke Li <kel@splunk.com> Ke Xu <leonhartx.k@gmail.com> Kei Ohmura <ohmura.kei@gmail.com> Keith Hudgins <greenman@greenman.org> Keli Hu <dev@keli.hu> Ken Cochrane <kencochrane@gmail.com> Ken Herner <kherner@progress.com> Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com> Ken Reese <krrgithub@gmail.com> Kenfe-Mickaël Laventure <mickael.laventure@gmail.com> Kenjiro Nakayama <nakayamakenjiro@gmail.com> Kent Johnson <kentoj@gmail.com> Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com> Kevin Burke <kev@inburke.com> Kevin Clark <kevin.clark@gmail.com> Kevin Feyrer <kevin.feyrer@btinternet.com> Kevin J. Lynagh <kevin@keminglabs.com> Kevin Jing Qiu <kevin@idempotent.ca> Kevin Kern <kaiwentan@harmonycloud.cn> Kevin Menard <kevin@nirvdrum.com> Kevin Meredith <kevin.m.meredith@gmail.com> Kevin P. Kucharczyk <kevinkucharczyk@gmail.com> Kevin Richardson <kevin@kevinrichardson.co> Kevin Shi <kshi@andrew.cmu.edu> Kevin Wallace <kevin@pentabarf.net> Kevin Yap <me@kevinyap.ca> Keyvan Fatehi <keyvanfatehi@gmail.com> kies <lleelm@gmail.com> Kim BKC Carlbacker <kim.carlbacker@gmail.com> Kim Eik <kim@heldig.org> Kimbro Staken <kstaken@kstaken.com> Kir Kolyshkin <kolyshkin@gmail.com> Kiran Gangadharan <kiran.daredevil@gmail.com> Kirill SIbirev <l0kix2@gmail.com> knappe <tyler.knappe@gmail.com> Kohei Tsuruta <coheyxyz@gmail.com> Koichi Shiraishi <k@zchee.io> Konrad Kleine <konrad.wilhelm.kleine@gmail.com> Konstantin Gribov <grossws@gmail.com> Konstantin L <sw.double@gmail.com> Konstantin Pelykh <kpelykh@zettaset.com> Krasi Georgiev <krasi@vip-consult.solutions> Krasimir Georgiev <support@vip-consult.co.uk> Kris-Mikael Krister <krismikael@protonmail.com> Kristian Haugene <kristian.haugene@capgemini.com> Kristina Zabunova <triara.xiii@gmail.com> Krystian Wojcicki <kwojcicki@sympatico.ca> Kun Zhang <zkazure@gmail.com> Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> Kunal Tyagi <tyagi.kunal@live.com> Kyle Conroy <kyle.j.conroy@gmail.com> Kyle Linden <linden.kyle@gmail.com> Kyle Wuolle <kyle.wuolle@gmail.com> kyu <leehk1227@gmail.com> Lachlan Coote <lcoote@vmware.com> Lai Jiangshan <jiangshanlai@gmail.com> Lajos Papp <lajos.papp@sequenceiq.com> Lakshan Perera <lakshan@laktek.com> Lalatendu Mohanty <lmohanty@redhat.com> Lance Chen <cyen0312@gmail.com> Lance Kinley <lkinley@loyaltymethods.com> Lars Butler <Lars.Butler@gmail.com> Lars Kellogg-Stedman <lars@redhat.com> Lars R. Damerow <lars@pixar.com> Lars-Magnus Skog <ralphtheninja@riseup.net> Laszlo Meszaros <lacienator@gmail.com> Laura Frank <ljfrank@gmail.com> Laurent Erignoux <lerignoux@gmail.com> Laurie Voss <github@seldo.com> Leandro Siqueira <leandro.siqueira@gmail.com> Lee Chao <932819864@qq.com> Lee, Meng-Han <sunrisedm4@gmail.com> leeplay <hyeongkyu.lee@navercorp.com> Lei Gong <lgong@alauda.io> Lei Jitang <leijitang@huawei.com> Len Weincier <len@cloudafrica.net> Lennie <github@consolejunkie.net> Leo Gallucci <elgalu3@gmail.com> Leszek Kowalski <github@leszekkowalski.pl> Levi Blackstone <levi.blackstone@rackspace.com> Levi Gross <levi@levigross.com> Lewis Daly <lewisdaly@me.com> Lewis Marshall <lewis@lmars.net> Lewis Peckover <lew+github@lew.io> Li Yi <denverdino@gmail.com> Liam Macgillavry <liam@kumina.nl> Liana Lo <liana.lixia@gmail.com> Liang Mingqiang <mqliang.zju@gmail.com> Liang-Chi Hsieh <viirya@gmail.com> Liao Qingwei <liaoqingwei@huawei.com> Lifubang <lifubang@acmcoder.com> Lihua Tang <lhtang@alauda.io> Lily Guo <lily.guo@docker.com> limsy <seongyeol37@gmail.com> Lin Lu <doraalin@163.com> LingFaKe <lingfake@huawei.com> Linus Heckemann <lheckemann@twig-world.com> Liran Tal <liran.tal@gmail.com> Liron Levin <liron@twistlock.com> Liu Bo <bo.li.liu@oracle.com> Liu Hua <sdu.liu@huawei.com> liwenqi <vikilwq@zju.edu.cn> lixiaobing10051267 <li.xiaobing1@zte.com.cn> Liz Zhang <lizzha@microsoft.com> LIZAO LI <lzlarryli@gmail.com> Lizzie Dixon <_@lizzie.io> Lloyd Dewolf <foolswisdom@gmail.com> Lokesh Mandvekar <lsm5@fedoraproject.org> longliqiang88 <394564827@qq.com> Lorenz Leutgeb <lorenz.leutgeb@gmail.com> Lorenzo Fontana <fontanalorenz@gmail.com> Lotus Fenn <fenn.lotus@gmail.com> Louis Opter <kalessin@kalessin.fr> Luca Favatella <luca.favatella@erlang-solutions.com> Luca Marturana <lucamarturana@gmail.com> Luca Orlandi <luca.orlandi@gmail.com> Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu> Lucas Chan <lucas-github@lucaschan.com> Lucas Chi <lucas@teacherspayteachers.com> Lucas Molas <lmolas@fundacionsadosky.org.ar> Lucas Silvestre <lukas.silvestre@gmail.com> Luciano Mores <leslau@gmail.com> Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com> Luiz Svoboda <luizek@gmail.com> Lukas Waslowski <cr7pt0gr4ph7@gmail.com> lukaspustina <lukas.pustina@centerdevice.com> Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com> Luke Marsden <me@lukemarsden.net> Lyn <energylyn@zju.edu.cn> Lynda O'Leary <lyndaoleary29@gmail.com> Lénaïc Huard <lhuard@amadeus.com> Ma Müller <mueller-ma@users.noreply.github.com> Ma Shimiao <mashimiao.fnst@cn.fujitsu.com> Mabin <bin.ma@huawei.com> Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> Madhav Puri <madhav.puri@gmail.com> Madhu Venugopal <madhu@socketplane.io> Mageee <fangpuyi@foxmail.com> Mahesh Tiyyagura <tmahesh@gmail.com> malnick <malnick@gmail..com> Malte Janduda <mail@janduda.net> Manfred Touron <m@42.am> Manfred Zabarauskas <manfredas@zabarauskas.com> Manjunath A Kumatagi <mkumatag@in.ibm.com> Mansi Nahar <mmn4185@rit.edu> Manuel Meurer <manuel@krautcomputing.com> Manuel Rüger <manuel@rueg.eu> Manuel Woelker <github@manuel.woelker.org> mapk0y <mapk0y@gmail.com> Marc Abramowitz <marc@marc-abramowitz.com> Marc Kuo <kuomarc2@gmail.com> Marc Tamsky <mtamsky@gmail.com> Marcel Edmund Franke <marcel.edmund.franke@gmail.com> Marcelo Horacio Fortino <info@fortinux.com> Marcelo Salazar <chelosalazar@gmail.com> Marco Hennings <marco.hennings@freiheit.com> Marcus Cobden <mcobden@cisco.com> Marcus Farkas <toothlessgear@finitebox.com> Marcus Linke <marcus.linke@gmx.de> Marcus Martins <marcus@docker.com> Marcus Ramberg <marcus@nordaaker.com> Marek Goldmann <marek.goldmann@gmail.com> Marian Marinov <mm@yuhu.biz> Marianna Tessel <mtesselh@gmail.com> Mario Loriedo <mario.loriedo@gmail.com> Marius Gundersen <me@mariusgundersen.net> Marius Sturm <marius@graylog.com> Marius Voila <marius.voila@gmail.com> Mark Allen <mrallen1@yahoo.com> Mark Jeromin <mark.jeromin@sysfrog.net> Mark McGranaghan <mmcgrana@gmail.com> Mark McKinstry <mmckinst@umich.edu> Mark Milstein <mark@epiloque.com> Mark Oates <fl0yd@me.com> Mark Parker <godefroi@users.noreply.github.com> Mark West <markewest@gmail.com> Markan Patel <mpatel678@gmail.com> Marko Mikulicic <mmikulicic@gmail.com> Marko Tibold <marko@tibold.nl> Markus Fix <lispmeister@gmail.com> Markus Kortlang <hyp3rdino@googlemail.com> Martijn Dwars <ikben@martijndwars.nl> Martijn van Oosterhout <kleptog@svana.org> Martin Honermeyer <maze@strahlungsfrei.de> Martin Kelly <martin@surround.io> Martin Mosegaard Amdisen <martin.amdisen@praqma.com> Martin Muzatko <martin@happy-css.com> Martin Redmond <redmond.martin@gmail.com> Mary Anthony <mary.anthony@docker.com> Masahito Zembutsu <zembutsu@users.noreply.github.com> Masato Ohba <over.rye@gmail.com> Masayuki Morita <minamijoyo@gmail.com> Mason Malone <mason.malone@gmail.com> Mateusz Sulima <sulima.mateusz@gmail.com> Mathias Monnerville <mathias@monnerville.com> Mathieu Champlon <mathieu.champlon@docker.com> Mathieu Le Marec - Pasquet <kiorky@cryptelium.net> Mathieu Parent <math.parent@gmail.com> Matt Apperson <me@mattapperson.com> Matt Bachmann <bachmann.matt@gmail.com> Matt Bentley <matt.bentley@docker.com> Matt Haggard <haggardii@gmail.com> Matt Hoyle <matt@deployable.co> Matt McCormick <matt.mccormick@kitware.com> Matt Moore <mattmoor@google.com> Matt Richardson <matt@redgumtech.com.au> Matt Rickard <mrick@google.com> Matt Robenolt <matt@ydekproductions.com> Matt Schurenko <matt.schurenko@gmail.com> Matt Williams <mattyw@me.com> Matthew Heon <mheon@redhat.com> Matthew Lapworth <matthewl@bit-shift.net> Matthew Mayer <matthewkmayer@gmail.com> Matthew Mosesohn <raytrac3r@gmail.com> Matthew Mueller <mattmuelle@gmail.com> Matthew Riley <mattdr@google.com> Matthias Klumpp <matthias@tenstral.net> Matthias Kühnle <git.nivoc@neverbox.com> Matthias Rampke <mr@soundcloud.com> Matthieu Hauglustaine <matt.hauglustaine@gmail.com> Mattias Jernberg <nostrad@gmail.com> Mauricio Garavaglia <mauricio@medallia.com> mauriyouth <mauriyouth@gmail.com> Max Shytikov <mshytikov@gmail.com> Maxim Fedchyshyn <sevmax@gmail.com> Maxim Ivanov <ivanov.maxim@gmail.com> Maxim Kulkin <mkulkin@mirantis.com> Maxim Treskin <zerthurd@gmail.com> Maxime Petazzoni <max@signalfuse.com> Maximiliano Maccanti <maccanti@amazon.com> Maxwell <csuhp007@gmail.com> Meaglith Ma <genedna@gmail.com> meejah <meejah@meejah.ca> Megan Kostick <mkostick@us.ibm.com> Mehul Kar <mehul.kar@gmail.com> Mei ChunTao <mei.chuntao@zte.com.cn> Mengdi Gao <usrgdd@gmail.com> Mert Yazıcıoğlu <merty@users.noreply.github.com> mgniu <mgniu@dataman-inc.com> Micah Zoltu <micah@newrelic.com> Michael A. Smith <michael@smith-li.com> Michael Bridgen <mikeb@squaremobius.net> Michael Brown <michael@netdirect.ca> Michael Chiang <mchiang@docker.com> Michael Crosby <michael@docker.com> Michael Currie <mcurrie@bruceforceresearch.com> Michael Friis <friism@gmail.com> Michael Gorsuch <gorsuch@github.com> Michael Grauer <michael.grauer@kitware.com> Michael Holzheu <holzheu@linux.vnet.ibm.com> Michael Hudson-Doyle <michael.hudson@canonical.com> Michael Huettermann <michael@huettermann.net> Michael Irwin <mikesir87@gmail.com> Michael Käufl <docker@c.michael-kaeufl.de> Michael Neale <michael.neale@gmail.com> Michael Nussbaum <michael.nussbaum@getbraintree.com> Michael Prokop <github@michael-prokop.at> Michael Scharf <github@scharf.gr> Michael Spetsiotis <michael_spets@hotmail.com> Michael Stapelberg <michael+gh@stapelberg.de> Michael Steinert <mike.steinert@gmail.com> Michael Thies <michaelthies78@gmail.com> Michael West <mwest@mdsol.com> Michal Fojtik <mfojtik@redhat.com> Michal Gebauer <mishak@mishak.net> Michal Jemala <michal.jemala@gmail.com> Michal Minář <miminar@redhat.com> Michal Wieczorek <wieczorek-michal@wp.pl> Michaël Pailloncy <mpapo.dev@gmail.com> Michał Czeraszkiewicz <czerasz@gmail.com> Michał Gryko <github@odkurzacz.org> Michiel de Jong <michiel@unhosted.org> Mickaël Fortunato <morsi.morsicus@gmail.com> Mickaël Remars <mickael@remars.com> Miguel Angel Fernández <elmendalerenda@gmail.com> Miguel Morales <mimoralea@gmail.com> Mihai Borobocea <MihaiBorob@gmail.com> Mihuleacc Sergiu <mihuleac.sergiu@gmail.com> Mike Brown <brownwm@us.ibm.com> Mike Casas <mkcsas0@gmail.com> Mike Chelen <michael.chelen@gmail.com> Mike Danese <mikedanese@google.com> Mike Dillon <mike@embody.org> Mike Dougherty <mike.dougherty@docker.com> Mike Estes <mike.estes@logos.com> Mike Gaffney <mike@uberu.com> Mike Goelzer <mike.goelzer@docker.com> Mike Leone <mleone896@gmail.com> Mike Lundy <mike@fluffypenguin.org> Mike MacCana <mike.maccana@gmail.com> Mike Naberezny <mike@naberezny.com> Mike Snitzer <snitzer@redhat.com> mikelinjie <294893458@qq.com> Mikhail Sobolev <mss@mawhrin.net> Miklos Szegedi <miklos.szegedi@cloudera.com> Milind Chawre <milindchawre@gmail.com> Miloslav Trmač <mitr@redhat.com> mingqing <limingqing@cyou-inc.com> Mingzhen Feng <fmzhen@zju.edu.cn> Misty Stanley-Jones <misty@docker.com> Mitch Capper <mitch.capper@gmail.com> Mizuki Urushida <z11111001011@gmail.com> mlarcher <github@ringabell.org> Mohammad Banikazemi <mb@us.ibm.com> Mohammad Nasirifar <farnasirim@gmail.com> Mohammed Aaqib Ansari <maaquib@gmail.com> Mohit Soni <mosoni@ebay.com> Moorthy RS <rsmoorthy@gmail.com> Morgan Bauer <mbauer@us.ibm.com> Morgante Pell <morgante.pell@morgante.net> Morgy93 <thomas@ulfertsprygoda.de> Morten Siebuhr <sbhr@sbhr.dk> Morton Fox <github@qslw.com> Moysés Borges <moysesb@gmail.com> mrfly <mr.wrfly@gmail.com> Mrunal Patel <mrunalp@gmail.com> Muayyad Alsadi <alsadi@gmail.com> Mustafa Akın <mustafa91@gmail.com> Muthukumar R <muthur@gmail.com> Máximo Cuadros <mcuadros@gmail.com> Médi-Rémi Hashim <medimatrix@users.noreply.github.com> Nace Oroz <orkica@gmail.com> Nahum Shalman <nshalman@omniti.com> Nakul Pathak <nakulpathak3@hotmail.com> Nalin Dahyabhai <nalin@redhat.com> Nan Monnand Deng <monnand@gmail.com> Naoki Orii <norii@cs.cmu.edu> Natalie Parker <nparker@omnifone.com> Natanael Copa <natanael.copa@docker.com> Natasha Jarus <linuxmercedes@gmail.com> Nate Brennand <nate.brennand@clever.com> Nate Eagleson <nate@nateeag.com> Nate Jones <nate@endot.org> Nathan Hsieh <hsieh.nathan@gmail.com> Nathan Kleyn <nathan@nathankleyn.com> Nathan LeClaire <nathan.leclaire@docker.com> Nathan McCauley <nathan.mccauley@docker.com> Nathan Williams <nathan@teamtreehouse.com> Naveed Jamil <naveed.jamil@tenpearls.com> Neal McBurnett <neal@mcburnett.org> Neil Horman <nhorman@tuxdriver.com> Neil Peterson <neilpeterson@outlook.com> Nelson Chen <crazysim@gmail.com> Neyazul Haque <nuhaque@gmail.com> Nghia Tran <nghia@google.com> Niall O'Higgins <niallo@unworkable.org> Nicholas E. Rabenau <nerab@gmx.at> Nick DeCoursin <n.decoursin@foodpanda.com> Nick Irvine <nfirvine@nfirvine.com> Nick Neisen <nwneisen@gmail.com> Nick Parker <nikaios@gmail.com> Nick Payne <nick@kurai.co.uk> Nick Russo <nicholasjamesrusso@gmail.com> Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk> Nick Stinemates <nick@stinemates.org> NickrenREN <yuquan.ren@easystack.cn> Nicola Kabar <nicolaka@gmail.com> Nicolas Borboën <ponsfrilus@gmail.com> Nicolas De Loof <nicolas.deloof@gmail.com> Nicolas Dudebout <nicolas.dudebout@gatech.edu> Nicolas Goy <kuon@goyman.com> Nicolas Kaiser <nikai@nikai.net> Nicolas Sterchele <sterchele.nicolas@gmail.com> Nicolas V Castet <nvcastet@us.ibm.com> Nicolás Hock Isaza <nhocki@gmail.com> Nigel Poulton <nigelpoulton@hotmail.com> Nik Nyby <nikolas@gnu.org> Nikhil Chawla <chawlanikhil24@gmail.com> NikolaMandic <mn080202@gmail.com> Nikolas Garofil <nikolas.garofil@uantwerpen.be> Nikolay Milovanov <nmil@itransformers.net> Nirmal Mehta <nirmalkmehta@gmail.com> Nishant Totla <nishanttotla@gmail.com> NIWA Hideyuki <niwa.niwa@nifty.ne.jp> Noah Meyerhans <nmeyerha@amazon.com> Noah Treuhaft <noah.treuhaft@docker.com> NobodyOnSE <ich@sektor.selfip.com> noducks <onemannoducks@gmail.com> Nolan Darilek <nolan@thewordnerd.info> Noriki Nakamura <noriki.nakamura@miraclelinux.com> nponeccop <andy.melnikov@gmail.com> Nuutti Kotivuori <naked@iki.fi> nzwsch <hi@nzwsch.com> O.S. Tezer <ostezer@gmail.com> objectified <objectified@gmail.com> Oguz Bilgic <fisyonet@gmail.com> Oh Jinkyun <tintypemolly@gmail.com> Ohad Schneider <ohadschn@users.noreply.github.com> ohmystack <jun.jiang02@ele.me> Ole Reifschneider <mail@ole-reifschneider.de> Oliver Neal <ItsVeryWindy@users.noreply.github.com> Oliver Reason <oli@overrateddev.co> Olivier Gambier <dmp42@users.noreply.github.com> Olle Jonsson <olle.jonsson@gmail.com> Olli Janatuinen <olli.janatuinen@gmail.com> Omri Shiv <Omri.Shiv@teradata.com> Oriol Francès <oriolfa@gmail.com> Oskar Niburski <oskarniburski@gmail.com> Otto Kekäläinen <otto@seravo.fi> Ouyang Liduo <oyld0210@163.com> Ovidio Mallo <ovidio.mallo@gmail.com> Panagiotis Moustafellos <pmoust@elastic.co> Paolo G. Giarrusso <p.giarrusso@gmail.com> Pascal <pascalgn@users.noreply.github.com> Pascal Borreli <pascal@borreli.com> Pascal Hartig <phartig@rdrei.net> Patrick Böänziger <patrick.baenziger@bsi-software.com> Patrick Devine <patrick.devine@docker.com> Patrick Hemmer <patrick.hemmer@gmail.com> Patrick Stapleton <github@gdi2290.com> Patrik Cyvoct <patrik@ptrk.io> pattichen <craftsbear@gmail.com> Paul <paul9869@gmail.com> paul <paul@inkling.com> Paul Annesley <paul@annesley.cc> Paul Bellamy <paul.a.bellamy@gmail.com> Paul Bowsher <pbowsher@globalpersonals.co.uk> Paul Furtado <pfurtado@hubspot.com> Paul Hammond <paul@paulhammond.org> Paul Jimenez <pj@place.org> Paul Kehrer <paul.l.kehrer@gmail.com> Paul Lietar <paul@lietar.net> Paul Liljenberg <liljenberg.paul@gmail.com> Paul Morie <pmorie@gmail.com> Paul Nasrat <pnasrat@gmail.com> Paul Weaver <pauweave@cisco.com> Paulo Ribeiro <paigr.io@gmail.com> Pavel Lobashov <ShockwaveNN@gmail.com> Pavel Pletenev <cpp.create@gmail.com> Pavel Pospisil <pospispa@gmail.com> Pavel Sutyrin <pavel.sutyrin@gmail.com> Pavel Tikhomirov <ptikhomirov@virtuozzo.com> Pavlos Ratis <dastergon@gentoo.org> Pavol Vargovcik <pallly.vargovcik@gmail.com> Pawel Konczalski <mail@konczalski.de> Peeyush Gupta <gpeeyush@linux.vnet.ibm.com> Peggy Li <peggyli.224@gmail.com> Pei Su <sillyousu@gmail.com> Peng Tao <bergwolf@gmail.com> Penghan Wang <ph.wang@daocloud.io> Per Weijnitz <per.weijnitz@gmail.com> perhapszzy@sina.com <perhapszzy@sina.com> Peter Bourgon <peter@bourgon.org> Peter Braden <peterbraden@peterbraden.co.uk> Peter Bücker <peter.buecker@pressrelations.de> Peter Choi <phkchoi89@gmail.com> Peter Dave Hello <hsu@peterdavehello.org> Peter Edge <peter.edge@gmail.com> Peter Ericson <pdericson@gmail.com> Peter Esbensen <pkesbensen@gmail.com> Peter Jaffe <pjaffe@nevo.com> Peter Kang <peter@spell.run> Peter Malmgren <ptmalmgren@gmail.com> Peter Salvatore <peter@psftw.com> Peter Volpe <petervo@redhat.com> Peter Waller <p@pwaller.net> Petr Švihlík <svihlik.petr@gmail.com> Phil <underscorephil@gmail.com> Phil Estes <estesp@linux.vnet.ibm.com> Phil Spitler <pspitler@gmail.com> Philip Alexander Etling <paetling@gmail.com> Philip Monroe <phil@philmonroe.com> Philipp Gillé <philipp.gille@gmail.com> Philipp Wahala <philipp.wahala@gmail.com> Philipp Weissensteiner <mail@philippweissensteiner.com> Phillip Alexander <git@phillipalexander.io> phineas <phin@phineas.io> pidster <pid@pidster.com> Piergiuliano Bossi <pgbossi@gmail.com> Pierre <py@poujade.org> Pierre Carrier <pierre@meteor.com> Pierre Dal-Pra <dalpra.pierre@gmail.com> Pierre Wacrenier <pierre.wacrenier@gmail.com> Pierre-Alain RIVIERE <pariviere@ippon.fr> Piotr Bogdan <ppbogdan@gmail.com> pixelistik <pixelistik@users.noreply.github.com> Porjo <porjo38@yahoo.com.au> Poul Kjeldager Sørensen <pks@s-innovations.net> Pradeep Chhetri <pradeep@indix.com> Pradip Dhara <pradipd@microsoft.com> Prasanna Gautam <prasannagautam@gmail.com> Pratik Karki <prertik@outlook.com> Prayag Verma <prayag.verma@gmail.com> Priya Wadhwa <priyawadhwa@google.com> Projjol Banerji <probaner23@gmail.com> Przemek Hejman <przemyslaw.hejman@gmail.com> Pure White <daniel48@126.com> pysqz <randomq@126.com> Qiang Huang <h.huangqiang@huawei.com> Qinglan Peng <qinglanpeng@zju.edu.cn> qudongfang <qudongfang@gmail.com> Quentin Brossard <qbrossard@gmail.com> Quentin Perez <qperez@ocs.online.net> Quentin Tayssier <qtayssier@gmail.com> r0n22 <cameron.regan@gmail.com> Radostin Stoyanov <rstoyanov1@gmail.com> Rafal Jeczalik <rjeczalik@gmail.com> Rafe Colton <rafael.colton@gmail.com> Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Raghuram Devarakonda <draghuram@gmail.com> Raja Sami <raja.sami@tenpearls.com> Rajat Pandit <rp@rajatpandit.com> Rajdeep Dua <dua_rajdeep@yahoo.com> Ralf Sippl <ralf.sippl@gmail.com> Ralle <spam@rasmusa.net> Ralph Bean <rbean@redhat.com> Ramkumar Ramachandra <artagnon@gmail.com> Ramon Brooker <rbrooker@aetherealmind.com> Ramon van Alteren <ramon@vanalteren.nl> RaviTeja Pothana <ravi-teja@live.com> Ray Tsang <rayt@google.com> ReadmeCritic <frankensteinbot@gmail.com> Recursive Madman <recursive.madman@gmx.de> Reficul <xuzhenglun@gmail.com> Regan McCooey <rmccooey27@aol.com> Remi Rampin <remirampin@gmail.com> Remy Suen <remy.suen@gmail.com> Renato Riccieri Santos Zannon <renato.riccieri@gmail.com> Renaud Gaubert <rgaubert@nvidia.com> Rhys Hiltner <rhys@twitch.tv> Ri Xu <xuri.me@gmail.com> Ricardo N Feliciano <FelicianoTech@gmail.com> Rich Moyse <rich@moyse.us> Rich Seymour <rseymour@gmail.com> Richard <richard.scothern@gmail.com> Richard Burnison <rburnison@ebay.com> Richard Harvey <richard@squarecows.com> Richard Mathie <richard.mathie@amey.co.uk> Richard Metzler <richard@paadee.com> Richard Scothern <richard.scothern@gmail.com> Richo Healey <richo@psych0tik.net> Rick Bradley <rick@users.noreply.github.com> Rick van de Loo <rickvandeloo@gmail.com> Rick Wieman <git@rickw.nl> Rik Nijessen <rik@keefo.nl> Riku Voipio <riku.voipio@linaro.org> Riley Guerin <rileytg.dev@gmail.com> Ritesh H Shukla <sritesh@vmware.com> Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com> Rob Vesse <rvesse@dotnetrdf.org> Robert Bachmann <rb@robertbachmann.at> Robert Bittle <guywithnose@gmail.com> Robert Obryk <robryk@gmail.com> Robert Schneider <mail@shakeme.info> Robert Stern <lexandro2000@gmail.com> Robert Terhaar <rterhaar@atlanticdynamic.com> Robert Wallis <smilingrob@gmail.com> Roberto G. Hashioka <roberto.hashioka@docker.com> Roberto Muñoz Fernández <robertomf@gmail.com> Robin Naundorf <r.naundorf@fh-muenster.de> Robin Schneider <ypid@riseup.net> Robin Speekenbrink <robin@kingsquare.nl> robpc <rpcann@gmail.com> Rodolfo Carvalho <rhcarvalho@gmail.com> Rodrigo Vaz <rodrigo.vaz@gmail.com> Roel Van Nyen <roel.vannyen@gmail.com> Roger Peppe <rogpeppe@gmail.com> Rohit Jnagal <jnagal@google.com> Rohit Kadam <rohit.d.kadam@gmail.com> Rohit Kapur <rkapur@flatiron.com> Rojin George <rojingeorge@huawei.com> Roland Huß <roland@jolokia.org> Roland Kammerer <roland.kammerer@linbit.com> Roland Moriz <rmoriz@users.noreply.github.com> Roma Sokolov <sokolov.r.v@gmail.com> Roman Dudin <katrmr@gmail.com> Roman Strashkin <roman.strashkin@gmail.com> Ron Smits <ron.smits@gmail.com> Ron Williams <ron.a.williams@gmail.com> Rong Gao <gaoronggood@163.com> Rong Zhang <rongzhang@alauda.io> Rongxiang Song <tinysong1226@gmail.com> root <docker-dummy@example.com> root <root@lxdebmas.marist.edu> root <root@ubuntu-14.04-amd64-vbox> root <root@webm215.cluster016.ha.ovh.net> Rory Hunter <roryhunter2@gmail.com> Rory McCune <raesene@gmail.com> Ross Boucher <rboucher@gmail.com> Rovanion Luckey <rovanion.luckey@gmail.com> Royce Remer <royceremer@gmail.com> Rozhnov Alexandr <nox73@ya.ru> Rudolph Gottesheim <r.gottesheim@loot.at> Rui Cao <ruicao@alauda.io> Rui Lopes <rgl@ruilopes.com> Runshen Zhu <runshen.zhu@gmail.com> Russ Magee <rmagee@gmail.com> Ryan Abrams <rdabrams@gmail.com> Ryan Anderson <anderson.ryanc@gmail.com> Ryan Aslett <github@mixologic.com> Ryan Belgrave <rmb1993@gmail.com> Ryan Detzel <ryan.detzel@gmail.com> Ryan Fowler <rwfowler@gmail.com> Ryan Liu <ryanlyy@me.com> Ryan McLaughlin <rmclaughlin@insidesales.com> Ryan O'Donnell <odonnellryanc@gmail.com> Ryan Seto <ryanseto@yak.net> Ryan Simmen <ryan.simmen@gmail.com> Ryan Stelly <ryan.stelly@live.com> Ryan Thomas <rthomas@atlassian.com> Ryan Trauntvein <rtrauntvein@novacoast.com> Ryan Wallner <ryan.wallner@clusterhq.com> Ryan Zhang <ryan.zhang@docker.com> ryancooper7 <ryan.cooper7@gmail.com> RyanDeng <sheldon.d1018@gmail.com> Ryo Nakao <nakabonne@gmail.com> Rémy Greinhofer <remy.greinhofer@livelovely.com> s. rannou <mxs@sbrk.org> s00318865 <sunyuan3@huawei.com> Sabin Basyal <sabin.basyal@gmail.com> Sachin Joshi <sachin_jayant_joshi@hotmail.com> Sagar Hani <sagarhani33@gmail.com> Sainath Grandhi <sainath.grandhi@intel.com> Sakeven Jiang <jc5930@sina.cn> Salahuddin Khan <salah@docker.com> Sally O'Malley <somalley@redhat.com> Sam Abed <sam.abed@gmail.com> Sam Alba <sam.alba@gmail.com> Sam Bailey <cyprix@cyprix.com.au> Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk> Sam Neirinck <sam@samneirinck.com> Sam Reis <sreis@atlassian.com> Sam Rijs <srijs@airpost.net> Sambuddha Basu <sambuddhabasu1@gmail.com> Sami Wagiaalla <swagiaal@redhat.com> Samuel Andaya <samuel@andaya.net> Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com> Samuel Karp <skarp@amazon.com> Samuel PHAN <samuel-phan@users.noreply.github.com> Sandeep Bansal <sabansal@microsoft.com> Sankar சங்கர் <sankar.curiosity@gmail.com> Sanket Saurav <sanketsaurav@gmail.com> Santhosh Manohar <santhosh@docker.com> sapphiredev <se.imas.kr@gmail.com> Sargun Dhillon <sargun@netflix.com> Sascha Andres <sascha.andres@outlook.com> Sascha Grunert <sgrunert@suse.com> Satnam Singh <satnam@raintown.org> Satoshi Amemiya <satoshi_amemiya@voyagegroup.com> Satoshi Tagomori <tagomoris@gmail.com> Scott Bessler <scottbessler@gmail.com> Scott Collier <emailscottcollier@gmail.com> Scott Johnston <scott@docker.com> Scott Stamp <scottstamp851@gmail.com> Scott Walls <sawalls@umich.edu> sdreyesg <sdreyesg@gmail.com> Sean Christopherson <sean.j.christopherson@intel.com> Sean Cronin <seancron@gmail.com> Sean Lee <seanlee@tw.ibm.com> Sean McIntyre <s.mcintyre@xverba.ca> Sean OMeara <sean@chef.io> Sean P. Kane <skane@newrelic.com> Sean Rodman <srodman7689@gmail.com> Sebastiaan van Steenis <mail@superseb.nl> Sebastiaan van Stijn <github@gone.nl> Senthil Kumar Selvaraj <senthil.thecoder@gmail.com> Senthil Kumaran <senthil@uthcode.com> SeongJae Park <sj38.park@gmail.com> Seongyeol Lim <seongyeol37@gmail.com> Serge Hallyn <serge.hallyn@ubuntu.com> Sergey Alekseev <sergey.alekseev.minsk@gmail.com> Sergey Evstifeev <sergey.evstifeev@gmail.com> Sergii Kabashniuk <skabashnyuk@codenvy.com> Sergio Lopez <slp@redhat.com> Serhat Gülçiçek <serhat25@gmail.com> SeungUkLee <lsy931106@gmail.com> Sevki Hasirci <s@sevki.org> Shane Canon <scanon@lbl.gov> Shane da Silva <shane@dasilva.io> Shaun Kaasten <shaunk@gmail.com> shaunol <shaunol@gmail.com> Shawn Landden <shawn@churchofgit.com> Shawn Siefkas <shawn.siefkas@meredith.com> shawnhe <shawnhe@shawnhedeMacBook-Pro.local> Shayne Wang <shaynexwang@gmail.com> Shekhar Gulati <shekhargulati84@gmail.com> Sheng Yang <sheng@yasker.org> Shengbo Song <thomassong@tencent.com> Shev Yan <yandong_8212@163.com> Shih-Yuan Lee <fourdollars@gmail.com> Shijiang Wei <mountkin@gmail.com> Shijun Qin <qinshijun16@mails.ucas.ac.cn> Shishir Mahajan <shishir.mahajan@redhat.com> Shoubhik Bose <sbose78@gmail.com> Shourya Sarcar <shourya.sarcar@gmail.com> shuai-z <zs.broccoli@gmail.com> Shukui Yang <yangshukui@huawei.com> Shuwei Hao <haosw@cn.ibm.com> Sian Lerk Lau <kiawin@gmail.com> Sidhartha Mani <sidharthamn@gmail.com> sidharthamani <sid@rancher.com> Silas Sewell <silas@sewell.org> Silvan Jegen <s.jegen@gmail.com> Simão Reis <smnrsti@gmail.com> Simei He <hesimei@zju.edu.cn> Simon Eskildsen <sirup@sirupsen.com> Simon Ferquel <simon.ferquel@docker.com> Simon Leinen <simon.leinen@gmail.com> Simon Menke <simon.menke@gmail.com> Simon Taranto <simon.taranto@gmail.com> Simon Vikstrom <pullreq@devsn.se> Sindhu S <sindhus@live.in> Sjoerd Langkemper <sjoerd-github@linuxonly.nl> Solganik Alexander <solganik@gmail.com> Solomon Hykes <solomon@docker.com> Song Gao <song@gao.io> Soshi Katsuta <soshi.katsuta@gmail.com> Soulou <leo@unbekandt.eu> Spencer Brown <spencer@spencerbrown.org> Spencer Smith <robertspencersmith@gmail.com> Sridatta Thatipamala <sthatipamala@gmail.com> Sridhar Ratnakumar <sridharr@activestate.com> Srini Brahmaroutu <srbrahma@us.ibm.com> Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com> Stanislav Bondarenko <stanislav.bondarenko@gmail.com> Steeve Morin <steeve.morin@gmail.com> Stefan Berger <stefanb@linux.vnet.ibm.com> Stefan J. Wernli <swernli@microsoft.com> Stefan Praszalowicz <stefan@greplin.com> Stefan S. <tronicum@user.github.com> Stefan Scherer <stefan.scherer@docker.com> Stefan Staudenmeyer <doerte@instana.com> Stefan Weil <sw@weilnetz.de> Stephan Spindler <shutefan@gmail.com> Stephen Crosby <stevecrozz@gmail.com> Stephen Day <stephen.day@docker.com> Stephen Drake <stephen@xenolith.net> Stephen Rust <srust@blockbridge.com> Steve Desmond <steve@vtsv.ca> Steve Dougherty <steve@asksteved.com> Steve Durrheimer <s.durrheimer@gmail.com> Steve Francia <steve.francia@gmail.com> Steve Koch <stevekochscience@gmail.com> Steven Burgess <steven.a.burgess@hotmail.com> Steven Erenst <stevenerenst@gmail.com> Steven Hartland <steven.hartland@multiplay.co.uk> Steven Iveson <sjiveson@outlook.com> Steven Merrill <steven.merrill@gmail.com> Steven Richards <steven@axiomzen.co> Steven Taylor <steven.taylor@me.com> Subhajit Ghosh <isubuz.g@gmail.com> Sujith Haridasan <sujith.h@gmail.com> Sun Gengze <690388648@qq.com> Sun Jianbo <wonderflow.sun@gmail.com> Sunny Gogoi <indiasuny000@gmail.com> Suryakumar Sudar <surya.trunks@gmail.com> Sven Dowideit <SvenDowideit@home.org.au> Swapnil Daingade <swapnil.daingade@gmail.com> Sylvain Baubeau <sbaubeau@redhat.com> Sylvain Bellemare <sylvain@ascribe.io> Sébastien <sebastien@yoozio.com> Sébastien HOUZÉ <cto@verylastroom.com> Sébastien Luttringer <seblu@seblu.net> Sébastien Stormacq <sebsto@users.noreply.github.com> Tabakhase <mail@tabakhase.com> Tadej Janež <tadej.j@nez.si> TAGOMORI Satoshi <tagomoris@gmail.com> tang0th <tang0th@gmx.com> Tangi Colin <tangicolin@gmail.com> Tatsuki Sugiura <sugi@nemui.org> Tatsushi Inagaki <e29253@jp.ibm.com> Taylan Isikdemir <taylani@google.com> Taylor Jones <monitorjbl@gmail.com> Ted M. Young <tedyoung@gmail.com> Tehmasp Chaudhri <tehmasp@gmail.com> Tejaswini Duggaraju <naduggar@microsoft.com> Tejesh Mehta <tejesh.mehta@gmail.com> terryding77 <550147740@qq.com> tgic <farmer1992@gmail.com> Thatcher Peskens <thatcher@docker.com> theadactyl <thea.lamkin@gmail.com> Thell 'Bo' Fowler <thell@tbfowler.name> Thermionix <bond711@gmail.com> Thijs Terlouw <thijsterlouw@gmail.com> Thomas Bikeev <thomas.bikeev@mac.com> Thomas Frössman <thomasf@jossystem.se> Thomas Gazagnaire <thomas@gazagnaire.org> Thomas Grainger <tagrain@gmail.com> Thomas Hansen <thomas.hansen@gmail.com> Thomas Leonard <thomas.leonard@docker.com> Thomas Léveil <thomasleveil@gmail.com> Thomas Orozco <thomas@orozco.fr> Thomas Riccardi <riccardi@systran.fr> Thomas Schroeter <thomas@cliqz.com> Thomas Sjögren <konstruktoid@users.noreply.github.com> Thomas Swift <tgs242@gmail.com> Thomas Tanaka <thomas.tanaka@oracle.com> Thomas Texier <sharkone@en-mousse.org> Ti Zhou <tizhou1986@gmail.com> Tianon Gravi <admwiggin@gmail.com> Tianyi Wang <capkurmagati@gmail.com> Tibor Vass <teabee89@gmail.com> Tiffany Jernigan <tiffany.f.j@gmail.com> Tiffany Low <tiffany@box.com> Tim Bart <tim@fewagainstmany.com> Tim Bosse <taim@bosboot.org> Tim Dettrick <t.dettrick@uq.edu.au> Tim Düsterhus <tim@bastelstu.be> Tim Hockin <thockin@google.com> Tim Potter <tpot@hpe.com> Tim Ruffles <oi@truffles.me.uk> Tim Smith <timbot@google.com> Tim Terhorst <mynamewastaken+git@gmail.com> Tim Wang <timwangdev@gmail.com> Tim Waugh <twaugh@redhat.com> Tim Wraight <tim.wraight@tangentlabs.co.uk> Tim Zju <21651152@zju.edu.cn> timfeirg <kkcocogogo@gmail.com> Timothy Hobbs <timothyhobbs@seznam.cz> tjwebb123 <tjwebb123@users.noreply.github.com> tobe <tobegit3hub@gmail.com> Tobias Bieniek <Tobias.Bieniek@gmx.de> Tobias Bradtke <webwurst@gmail.com> Tobias Gesellchen <tobias@gesellix.de> Tobias Klauser <tklauser@distanz.ch> Tobias Munk <schmunk@usrbin.de> Tobias Schmidt <ts@soundcloud.com> Tobias Schwab <tobias.schwab@dynport.de> Todd Crane <todd@toddcrane.com> Todd Lunter <tlunter@gmail.com> Todd Whiteman <todd.whiteman@joyent.com> Toli Kuznets <toli@docker.com> Tom Barlow <tomwbarlow@gmail.com> Tom Booth <tombooth@gmail.com> Tom Denham <tom@tomdee.co.uk> Tom Fotherby <tom+github@peopleperhour.com> Tom Howe <tom.howe@enstratius.com> Tom Hulihan <hulihan.tom159@gmail.com> Tom Maaswinkel <tom.maaswinkel@12wiki.eu> Tom Sweeney <tsweeney@redhat.com> Tom Wilkie <tom.wilkie@gmail.com> Tom X. Tobin <tomxtobin@tomxtobin.com> Tomas Tomecek <ttomecek@redhat.com> Tomasz Kopczynski <tomek@kopczynski.net.pl> Tomasz Lipinski <tlipinski@users.noreply.github.com> Tomasz Nurkiewicz <nurkiewicz@gmail.com> Tommaso Visconti <tommaso.visconti@gmail.com> Tomáš Hrčka <thrcka@redhat.com> Tonny Xu <tonny.xu@gmail.com> Tony Abboud <tdabboud@hotmail.com> Tony Daws <tony@daws.ca> Tony Miller <mcfiredrill@gmail.com> toogley <toogley@mailbox.org> Torstein Husebø <torstein@huseboe.net> Tõnis Tiigi <tonistiigi@gmail.com> tpng <benny.tpng@gmail.com> tracylihui <793912329@qq.com> Trapier Marshall <trapier.marshall@docker.com> Travis Cline <travis.cline@gmail.com> Travis Thieman <travis.thieman@gmail.com> Trent Ogren <tedwardo2@gmail.com> Trevor <trevinwoodstock@gmail.com> Trevor Pounds <trevor.pounds@gmail.com> Trevor Sullivan <pcgeek86@gmail.com> Trishna Guha <trishnaguha17@gmail.com> Tristan Carel <tristan@cogniteev.com> Troy Denton <trdenton@gmail.com> Tycho Andersen <tycho@docker.com> Tyler Brock <tyler.brock@gmail.com> Tyler Brown <tylers.pile@gmail.com> Tzu-Jung Lee <roylee17@gmail.com> uhayate <uhayate.gong@daocloud.io> Ulysse Carion <ulyssecarion@gmail.com> Umesh Yadav <umesh4257@gmail.com> Utz Bacher <utz.bacher@de.ibm.com> vagrant <vagrant@ubuntu-14.04-amd64-vbox> Vaidas Jablonskis <jablonskis@gmail.com> vanderliang <lansheng@meili-inc.com> Veres Lajos <vlajos@gmail.com> Victor Algaze <valgaze@gmail.com> Victor Coisne <victor.coisne@dotcloud.com> Victor Costan <costan@gmail.com> Victor I. Wood <viw@t2am.com> Victor Lyuboslavsky <victor@victoreda.com> Victor Marmol <vmarmol@google.com> Victor Palma <palma.victor@gmail.com> Victor Vieux <victor.vieux@docker.com> Victoria Bialas <victoria.bialas@docker.com> Vijaya Kumar K <vijayak@caviumnetworks.com> Viktor Stanchev <me@viktorstanchev.com> Viktor Vojnovski <viktor.vojnovski@amadeus.com> VinayRaghavanKS <raghavan.vinay@gmail.com> Vincent Batts <vbatts@redhat.com> Vincent Bernat <Vincent.Bernat@exoscale.ch> Vincent Demeester <vincent.demeester@docker.com> Vincent Giersch <vincent.giersch@ovh.net> Vincent Mayers <vincent.mayers@inbloom.org> Vincent Woo <me@vincentwoo.com> Vinod Kulkarni <vinod.kulkarni@gmail.com> Vishal Doshi <vishal.doshi@gmail.com> Vishnu Kannan <vishnuk@google.com> Vitaly Ostrosablin <vostrosablin@virtuozzo.com> Vitor Monteiro <vmrmonteiro@gmail.com> Vivek Agarwal <me@vivek.im> Vivek Dasgupta <vdasgupt@redhat.com> Vivek Goyal <vgoyal@redhat.com> Vladimir Bulyga <xx@ccxx.cc> Vladimir Kirillov <proger@wilab.org.ua> Vladimir Pouzanov <farcaller@google.com> Vladimir Rutsky <altsysrq@gmail.com> Vladimir Varankin <nek.narqo+git@gmail.com> VladimirAus <v_roudakov@yahoo.com> Vlastimil Zeman <vlastimil.zeman@diffblue.com> Vojtech Vitek (V-Teq) <vvitek@redhat.com> waitingkuo <waitingkuo0527@gmail.com> Walter Leibbrandt <github@wrl.co.za> Walter Stanish <walter@pratyeka.org> Wang Chao <chao.wang@ucloud.cn> Wang Guoliang <liangcszzu@163.com> Wang Jie <wangjie5@chinaskycloud.com> Wang Long <long.wanglong@huawei.com> Wang Ping <present.wp@icloud.com> Wang Xing <hzwangxing@corp.netease.com> Wang Yuexiao <wang.yuexiao@zte.com.cn> Ward Vandewege <ward@jhvc.com> WarheadsSE <max@warheads.net> Wassim Dhif <wassimdhif@gmail.com> Wayne Chang <wayne@neverfear.org> Wayne Song <wsong@docker.com> Weerasak Chongnguluam <singpor@gmail.com> Wei Fu <fuweid89@gmail.com> Wei Wu <wuwei4455@gmail.com> Wei-Ting Kuo <waitingkuo0527@gmail.com> weipeng <weipeng@tuscloud.io> weiyan <weiyan3@huawei.com> Weiyang Zhu <cnresonant@gmail.com> Wen Cheng Ma <wenchma@cn.ibm.com> Wendel Fleming <wfleming@usc.edu> Wenjun Tang <tangwj2@lenovo.com> Wenkai Yin <yinw@vmware.com> Wentao Zhang <zhangwentao234@huawei.com> Wenxuan Zhao <viz@linux.com> Wenyu You <21551128@zju.edu.cn> Wenzhi Liang <wenzhi.liang@gmail.com> Wes Morgan <cap10morgan@gmail.com> Wewang Xiaorenfine <wang.xiaoren@zte.com.cn> Will Dietz <w@wdtz.org> Will Rouesnel <w.rouesnel@gmail.com> Will Weaver <monkey@buildingbananas.com> willhf <willhf@gmail.com> William Delanoue <william.delanoue@gmail.com> William Henry <whenry@redhat.com> William Hubbs <w.d.hubbs@gmail.com> William Martin <wmartin@pivotal.io> William Riancho <wr.wllm@gmail.com> William Thurston <thurstw@amazon.com> WiseTrem <shepelyov.g@gmail.com> Wolfgang Powisch <powo@powo.priv.at> Wonjun Kim <wonjun.kim@navercorp.com> xamyzhao <x.amy.zhao@gmail.com> Xian Chaobo <xianchaobo@huawei.com> Xianglin Gao <xlgao@zju.edu.cn> Xianlu Bird <xianlubird@gmail.com> Xiao YongBiao <xyb4638@gmail.com> XiaoBing Jiang <s7v7nislands@gmail.com> Xiaodong Zhang <a4012017@sina.com> Xiaoxi He <xxhe@alauda.io> Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn> Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn> xichengliudui <1693291525@qq.com> xiekeyang <xiekeyang@huawei.com> Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com> Xinbo Weng <xihuanbo_0521@zju.edu.cn> Xinzi Zhou <imdreamrunner@gmail.com> Xiuming Chen <cc@cxm.cc> Xuecong Liao <satorulogic@gmail.com> xuzhaokui <cynicholas@gmail.com> Yadnyawalkya Tale <ytale@redhat.com> Yahya <ya7yaz@gmail.com> YAMADA Tsuyoshi <tyamada@minimum2scp.org> Yamasaki Masahide <masahide.y@gmail.com> Yan Feng <yanfeng2@huawei.com> Yang Bai <hamo.by@gmail.com> Yang Pengfei <yangpengfei4@huawei.com> yangchenliang <yangchenliang@huawei.com> Yanqiang Miao <miao.yanqiang@zte.com.cn> Yao Zaiyong <yaozaiyong@hotmail.com> Yassine Tijani <yasstij11@gmail.com> Yasunori Mahata <nori@mahata.net> Yazhong Liu <yorkiefixer@gmail.com> Yestin Sun <sunyi0804@gmail.com> Yi EungJun <eungjun.yi@navercorp.com> Yibai Zhang <xm1994@gmail.com> Yihang Ho <hoyihang5@gmail.com> Ying Li <ying.li@docker.com> Yohei Ueda <yohei@jp.ibm.com> Yong Tang <yong.tang.github@outlook.com> Yongxin Li <yxli@alauda.io> Yongzhi Pan <panyongzhi@gmail.com> Yosef Fertel <yfertel@gmail.com> You-Sheng Yang (楊有勝) <vicamo@gmail.com> Youcef YEKHLEF <yyekhlef@gmail.com> Yu Changchun <yuchangchun1@huawei.com> Yu Chengxia <yuchengxia@huawei.com> Yu Peng <yu.peng36@zte.com.cn> Yu-Ju Hong <yjhong@google.com> Yuan Sun <sunyuan3@huawei.com> Yuanhong Peng <pengyuanhong@huawei.com> Yue Zhang <zy675793960@yeah.net> Yuhao Fang <fangyuhao@gmail.com> Yuichiro Kaneko <spiketeika@gmail.com> Yunxiang Huang <hyxqshk@vip.qq.com> Yurii Rashkovskii <yrashk@gmail.com> Yusuf Tarık Günaydın <yusuf_tarik@hotmail.com> Yves Junqueira <yves.junqueira@gmail.com> Zac Dover <zdover@redhat.com> Zach Borboa <zachborboa@gmail.com> Zachary Jaffee <zjaffee@us.ibm.com> Zain Memon <zain@inzain.net> Zaiste! <oh@zaiste.net> Zane DeGraffenried <zane.deg@gmail.com> Zefan Li <lizefan@huawei.com> Zen Lin(Zhinan Lin) <linzhinan@huawei.com> Zhang Kun <zkazure@gmail.com> Zhang Wei <zhangwei555@huawei.com> Zhang Wentao <zhangwentao234@huawei.com> ZhangHang <stevezhang2014@gmail.com> zhangxianwei <xianwei.zw@alibaba-inc.com> Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo <zhenghenghuo@zju.edu.cn> Zhenhai Gao <gaozh1988@live.com> Zhenkun Bi <bi.zhenkun@zte.com.cn> Zhou Hao <zhouhao@cn.fujitsu.com> Zhoulin Xie <zhoulin.xie@daocloud.io> Zhu Guihua <zhugh.fnst@cn.fujitsu.com> Zhu Kunjia <zhu.kunjia@zte.com.cn> Zhuoyun Wei <wzyboy@wzyboy.org> Zilin Du <zilin.du@gmail.com> zimbatm <zimbatm@zimbatm.com> Ziming Dong <bnudzm@foxmail.com> ZJUshuaizhou <21551191@zju.edu.cn> zmarouf <zeid.marouf@gmail.com> Zoltan Tombol <zoltan.tombol@gmail.com> Zou Yu <zouyu7@huawei.com> zqh <zqhxuyuan@gmail.com> Zuhayr Elahi <elahi.zuhayr@gmail.com> Zunayed Ali <zunayed@gmail.com> Álex González <agonzalezro@gmail.com> Álvaro Lázaro <alvaro.lazaro.g@gmail.com> Átila Camurça Alves <camurca.home@gmail.com> 尹吉峰 <jifeng.yin@gmail.com> 徐俊杰 <paco.xu@daocloud.io> 慕陶 <jihui.xjh@alibaba-inc.com> 搏通 <yufeng.pyf@alibaba-inc.com> 黄艳红00139573 <huang.yanhong@zte.com.cn>
9,379
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,380
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/stringid/README.md
This package provides helper functions for dealing with string identifiers
9,381
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/stringid/stringid.go
// Package stringid provides helper functions for dealing with string identifiers package stringid // import "github.com/docker/docker/pkg/stringid" import ( "crypto/rand" "encoding/hex" "fmt" "regexp" "strconv" "strings" ) const shortLen = 12 var ( validShortID = regexp.MustCompile("^[a-f0-9]{12}$") validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) ) // IsShortID determines if an arbitrary string *looks like* a short ID. func IsShortID(id string) bool { return validShortID.MatchString(id) } // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a longer prefix, or the full-length Id. func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] } if len(id) > shortLen { id = id[:shortLen] } return id } // GenerateRandomID returns a unique id. func GenerateRandomID() string { b := make([]byte, 32) for { if _, err := rand.Read(b); err != nil { panic(err) // This shouldn't happen } id := hex.EncodeToString(b) // if we try to parse the truncated for as an int and we don't have // an error then the value is all numeric and causes issues when // used as a hostname. ref #3869 if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { continue } return id } } // ValidateID checks whether an ID string is a valid image ID. func ValidateID(id string) error { if ok := validHex.MatchString(id); !ok { return fmt.Errorf("image ID %q is invalid", id) } return nil }
9,382
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
// +build darwin freebsd openbsd netbsd package term // import "github.com/docker/docker/pkg/term" import ( "unsafe" "golang.org/x/sys/unix" ) const ( getTermios = unix.TIOCGETA setTermios = unix.TIOCSETA ) // Termios is the Unix API for terminal I/O. type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) newState.Oflag &^= unix.OPOST newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) newState.Cflag &^= (unix.CSIZE | unix.PARENB) newState.Cflag |= unix.CS8 newState.Cc[unix.VMIN] = 1 newState.Cc[unix.VTIME] = 0 if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil }
9,383
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/winsize.go
// +build !windows package term // import "github.com/docker/docker/pkg/term" import ( "golang.org/x/sys/unix" ) // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} return ws, err } // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) }
9,384
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/term_windows.go
package term // import "github.com/docker/docker/pkg/term" import ( "io" "os" "os/signal" "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. type State struct { mode uint32 } // Winsize is used for window size. type Winsize struct { Height uint16 Width uint16 } // vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool // StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. var emulateStdin, emulateStdout, emulateStderr bool fd := os.Stdin.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true } // Unconditionally set the console mode back even on failure because SetConsoleMode // remembers invalid bits on input handles. winterm.SetConsoleMode(fd, mode) } fd = os.Stdout.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } fd = os.Stderr.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as // go-ansiterm hasn't switch to x/sys/windows. // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } return } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return windowsconsole.GetHandleInfo(in) } // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err } winsize := &Winsize{ Width: uint16(info.Window.Right - info.Window.Left + 1), Height: uint16(info.Window.Bottom - info.Window.Top + 1), } return winsize, nil } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return windowsconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { return winterm.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { mode, e := winterm.GetConsoleMode(fd) if e != nil { return nil, e } return &State{mode: mode}, nil } // DisableEcho disables echo for the terminal connected to the given file descriptor. // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode mode &^= winterm.ENABLE_ECHO_INPUT mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT err := winterm.SetConsoleMode(fd, mode) if err != nil { return err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into // raw mode and returns the previous state. On UNIX, this puts both the input // and output into raw mode. On Windows, it only puts the input into raw mode. func SetRawTerminal(fd uintptr) (*State, error) { state, err := MakeRaw(fd) if err != nil { return nil, err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return state, err } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. func SetRawTerminalOutput(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) return state, err } // MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be restored. func MakeRaw(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } mode := state.mode // See // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx // Disable these modes mode &^= winterm.ENABLE_ECHO_INPUT mode &^= winterm.ENABLE_LINE_INPUT mode &^= winterm.ENABLE_MOUSE_INPUT mode &^= winterm.ENABLE_WINDOW_INPUT mode &^= winterm.ENABLE_PROCESSED_INPUT // Enable these modes mode |= winterm.ENABLE_EXTENDED_FLAGS mode |= winterm.ENABLE_INSERT_MODE mode |= winterm.ENABLE_QUICK_EDIT_MODE if vtInputSupported { mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT } err = winterm.SetConsoleMode(fd, mode) if err != nil { return nil, err } return state, nil } func restoreAtInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() }
9,385
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/proxy.go
package term // import "github.com/docker/docker/pkg/term" import ( "io" ) // EscapeError is special error which returned by a TTY proxy reader's Read() // method in case its detach escape sequence is read. type EscapeError struct{} func (EscapeError) Error() string { return "read escape sequence" } // escapeProxy is used only for attaches with a TTY. It is used to proxy // stdin keypresses from the underlying reader and look for the passed in // escape key sequence to signal a detach. type escapeProxy struct { escapeKeys []byte escapeKeyPos int r io.Reader } // NewEscapeProxy returns a new TTY proxy reader which wraps the given reader // and detects when the specified escape keys are read, in which case the Read // method will return an error of type EscapeError. func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { return &escapeProxy{ escapeKeys: escapeKeys, r: r, } } func (r *escapeProxy) Read(buf []byte) (int, error) { nr, err := r.r.Read(buf) if len(r.escapeKeys) == 0 { return nr, err } preserve := func() { // this preserves the original key presses in the passed in buffer nr += r.escapeKeyPos preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) preserve = append(preserve, buf...) r.escapeKeyPos = 0 copy(buf[0:nr], preserve) } if nr != 1 || err != nil { if r.escapeKeyPos > 0 { preserve() } return nr, err } if buf[0] != r.escapeKeys[r.escapeKeyPos] { if r.escapeKeyPos > 0 { preserve() } return nr, nil } if r.escapeKeyPos == len(r.escapeKeys)-1 { return 0, EscapeError{} } // Looks like we've got an escape key, but we need to match again on the next // read. // Store the current escape key we found so we can look for the next one on // the next read. // Since this is an escape key, make sure we don't let the caller read it // If later on we find that this is not the escape sequence, we'll add the // keys back r.escapeKeyPos++ return nr - r.escapeKeyPos, nil }
9,386
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/tc.go
// +build !windows package term // import "github.com/docker/docker/pkg/term" import ( "syscall" "unsafe" "golang.org/x/sys/unix" ) func tcget(fd uintptr, p *Termios) syscall.Errno { _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) return err } func tcset(fd uintptr, p *Termios) syscall.Errno { _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) return err }
9,387
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/ascii.go
package term // import "github.com/docker/docker/pkg/term" import ( "fmt" "strings" ) // ASCII list the possible supported ASCII key sequence var ASCII = []string{ "ctrl-@", "ctrl-a", "ctrl-b", "ctrl-c", "ctrl-d", "ctrl-e", "ctrl-f", "ctrl-g", "ctrl-h", "ctrl-i", "ctrl-j", "ctrl-k", "ctrl-l", "ctrl-m", "ctrl-n", "ctrl-o", "ctrl-p", "ctrl-q", "ctrl-r", "ctrl-s", "ctrl-t", "ctrl-u", "ctrl-v", "ctrl-w", "ctrl-x", "ctrl-y", "ctrl-z", "ctrl-[", "ctrl-\\", "ctrl-]", "ctrl-^", "ctrl-_", } // ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. func ToBytes(keys string) ([]byte, error) { codes := []byte{} next: for _, key := range strings.Split(keys, ",") { if len(key) != 1 { for code, ctrl := range ASCII { if ctrl == key { codes = append(codes, byte(code)) continue next } } if key == "DEL" { codes = append(codes, 127) } else { return nil, fmt.Errorf("Unknown character: '%s'", key) } } else { codes = append(codes, key[0]) } } return codes, nil }
9,388
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/term.go
// +build !windows // Package term provides structures and helper functions to work with // terminal (state, sizes). package term // import "github.com/docker/docker/pkg/term" import ( "errors" "fmt" "io" "os" "os/signal" "golang.org/x/sys/unix" ) var ( // ErrInvalidState is returned if the state of the terminal is invalid. ErrInvalidState = errors.New("Invalid terminal state") ) // State represents the state of the terminal. type State struct { termios Termios } // Winsize represents the size of the terminal window. type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } // StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { return os.Stdin, os.Stdout, os.Stderr } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { var inFd uintptr var isTerminalIn bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = IsTerminal(inFd) } return inFd, isTerminalIn } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios return tcget(fd, &termios) == 0 } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } if err := tcset(fd, &state.termios); err != 0 { return err } return nil } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } return &oldState, nil } // DisableEcho applies the specified state to the terminal connected to the file // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= unix.ECHO if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into // raw mode and returns the previous state. On UNIX, this puts both the input // and output into raw mode. On Windows, it only puts the input into raw mode. func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { return nil, err } handleInterrupt(fd, oldState) return oldState, err } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. func SetRawTerminalOutput(fd uintptr) (*State, error) { return nil, nil } func handleInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { for range sigchan { // quit cleanly and the new terminal item is on a new line fmt.Println() signal.Stop(sigchan) close(sigchan) RestoreTerminal(fd, state) os.Exit(1) } }() }
9,389
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/termios_linux.go
package term // import "github.com/docker/docker/pkg/term" import ( "golang.org/x/sys/unix" ) const ( getTermios = unix.TCGETS setTermios = unix.TCSETS ) // Termios is the Unix API for terminal I/O. type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { termios, err := unix.IoctlGetTermios(int(fd), getTermios) if err != nil { return nil, err } var oldState State oldState.termios = Termios(*termios) termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) termios.Oflag &^= unix.OPOST termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) termios.Cflag &^= (unix.CSIZE | unix.PARENB) termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { return nil, err } return &oldState, nil }
9,390
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
// +build windows package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "bytes" "errors" "fmt" "io" "os" "strings" "unsafe" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" ) const ( escapeSequence = ansiterm.KEY_ESC_CSI ) // ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. type ansiReader struct { file *os.File fd uintptr buffer []byte cbBuffer int command []byte } // NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a // Windows console input handle. func NewAnsiReader(nFile int) io.ReadCloser { initLogger() file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, fd: fd, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), buffer: make([]byte, 0), } } // Close closes the wrapped file. func (ar *ansiReader) Close() (err error) { return ar.file.Close() } // Fd returns the file descriptor of the wrapped file. func (ar *ansiReader) Fd() uintptr { return ar.fd } // Read reads up to len(p) bytes of translated input events into p. func (ar *ansiReader) Read(p []byte) (int, error) { if len(p) == 0 { return 0, nil } // Previously read bytes exist, read as much as we can and return if len(ar.buffer) > 0 { logger.Debugf("Reading previously cached bytes") originalLength := len(ar.buffer) copiedLength := copy(p, ar.buffer) if copiedLength == originalLength { ar.buffer = make([]byte, 0, len(p)) } else { ar.buffer = ar.buffer[copiedLength:] } logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) return copiedLength, nil } // Read and translate key events events, err := readInputEvents(ar.fd, len(p)) if err != nil { return 0, err } else if len(events) == 0 { logger.Debug("No input events detected") return 0, nil } keyBytes := translateKeyEvents(events, []byte(escapeSequence)) // Save excess bytes and right-size keyBytes if len(keyBytes) > len(p) { logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { logger.Debug("No key bytes returned from the translator") return 0, nil } copiedLength := copy(p, keyBytes) if copiedLength != len(keyBytes) { return 0, errors.New("unexpected copy length encountered") } logger.Debugf("Read p[%d]: % x", copiedLength, p) logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) return copiedLength, nil } // readInputEvents polls until at least one event is available. func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting // tricks the type system into believing it has such an expression. recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) countRecords := maxBytes / recordSize if countRecords > ansiterm.MAX_INPUT_EVENTS { countRecords = ansiterm.MAX_INPUT_EVENTS } else if countRecords == 0 { countRecords = 1 } logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { err = winterm.ReadConsoleInput(fd, events, &nEvents) if err != nil { return nil, err } } // Return a slice restricted to the number of returned records logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) return events[:nEvents], nil } // KeyEvent Translation Helpers var arrowKeyMapPrefix = map[uint16]string{ winterm.VK_UP: "%s%sA", winterm.VK_DOWN: "%s%sB", winterm.VK_RIGHT: "%s%sC", winterm.VK_LEFT: "%s%sD", } var keyMapPrefix = map[uint16]string{ winterm.VK_UP: "\x1B[%sA", winterm.VK_DOWN: "\x1B[%sB", winterm.VK_RIGHT: "\x1B[%sC", winterm.VK_LEFT: "\x1B[%sD", winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 winterm.VK_INSERT: "\x1B[2%s~", winterm.VK_DELETE: "\x1B[3%s~", winterm.VK_PRIOR: "\x1B[5%s~", winterm.VK_NEXT: "\x1B[6%s~", winterm.VK_F1: "", winterm.VK_F2: "", winterm.VK_F3: "\x1B[13%s~", winterm.VK_F4: "\x1B[14%s~", winterm.VK_F5: "\x1B[15%s~", winterm.VK_F6: "\x1B[17%s~", winterm.VK_F7: "\x1B[18%s~", winterm.VK_F8: "\x1B[19%s~", winterm.VK_F9: "\x1B[20%s~", winterm.VK_F10: "\x1B[21%s~", winterm.VK_F11: "\x1B[23%s~", winterm.VK_F12: "\x1B[24%s~", } // translateKeyEvents converts the input events into the appropriate ANSI string. func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { var buffer bytes.Buffer for _, event := range events { if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) } } return buffer.Bytes() } // keyToString maps the given input event record to the corresponding string. func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { if keyEvent.UnicodeChar == 0 { return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) } _, alt, control := getControlKeys(keyEvent.ControlKeyState) if control { // TODO(azlinux): Implement following control sequences // <Ctrl>-D Signals the end of input from the keyboard; also exits current shell. // <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key. // <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s. // <Ctrl>-S Suspends printing on the screen (does not stop the program). // <Ctrl>-U Deletes all characters on the current line. Also called the KILL key. // <Ctrl>-E Quits current command and creates a core } // <Alt>+Key generates ESC N Key if !control && alt { return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) } return string(keyEvent.UnicodeChar) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { shift, alt, control := getControlKeys(controlState) modifier := getControlKeysModifier(shift, alt, control) if format, ok := arrowKeyMapPrefix[key]; ok { return fmt.Sprintf(format, escapeSequence, modifier) } if format, ok := keyMapPrefix[key]; ok { return fmt.Sprintf(format, modifier) } return "" } // getControlKeys extracts the shift, alt, and ctrl key states. func getControlKeys(controlState uint32) (shift, alt, control bool) { shift = 0 != (controlState & winterm.SHIFT_PRESSED) alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) return shift, alt, control } // getControlKeysModifier returns the ANSI modifier for the given combination of control keys. func getControlKeysModifier(shift, alt, control bool) string { if shift && alt && control { return ansiterm.KEY_CONTROL_PARAM_8 } if alt && control { return ansiterm.KEY_CONTROL_PARAM_7 } if shift && control { return ansiterm.KEY_CONTROL_PARAM_6 } if control { return ansiterm.KEY_CONTROL_PARAM_5 } if shift && alt { return ansiterm.KEY_CONTROL_PARAM_4 } if alt { return ansiterm.KEY_CONTROL_PARAM_3 } if shift { return ansiterm.KEY_CONTROL_PARAM_2 } return "" }
9,391
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/windows/console.go
// +build windows package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "os" "github.com/Azure/go-ansiterm/winterm" ) // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. func GetHandleInfo(in interface{}) (uintptr, bool) { switch t := in.(type) { case *ansiReader: return t.Fd(), true case *ansiWriter: return t.Fd(), true } var inFd uintptr var isTerminal bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminal = IsConsole(inFd) } return inFd, isTerminal } // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. func IsConsole(fd uintptr) bool { _, e := winterm.GetConsoleMode(fd) return e == nil }
9,392
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/windows/windows.go
// These files implement ANSI-aware input and output streams for use by the Docker Windows client. // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "io/ioutil" "os" "sync" "github.com/Azure/go-ansiterm" "github.com/sirupsen/logrus" ) var logger *logrus.Logger var initOnce sync.Once func initLogger() { initOnce.Do(func() { logFile := ioutil.Discard if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiReaderWriter.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.DebugLevel, } }) }
9,393
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
// +build windows package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "io" "os" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" ) // ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. type ansiWriter struct { file *os.File fd uintptr infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO command []byte escapeSequence []byte inAnsiSequence bool parser *ansiterm.AnsiParser } // NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a // Windows console output handle. func NewAnsiWriter(nFile int) io.Writer { initLogger() file, fd := winterm.GetStdFile(nFile) info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil } parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) logger.Infof("newAnsiWriter: parser %p", parser) aw := &ansiWriter{ file: file, fd: fd, infoReset: info, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) logger.Infof("newAnsiWriter: %v", aw) return aw } func (aw *ansiWriter) Fd() uintptr { return aw.fd } // Write writes len(p) bytes from p to the underlying data stream. func (aw *ansiWriter) Write(p []byte) (total int, err error) { if len(p) == 0 { return 0, nil } logger.Infof("Write: % x", p) logger.Infof("Write: %s", string(p)) return aw.parser.Parse(p) }
9,394
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" import ( "encoding/json" "fmt" "io" "strings" "time" "github.com/docker/docker/pkg/term" "github.com/docker/go-units" "github.com/morikuni/aec" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to // ensure the formatted time isalways the same number of characters. const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" // JSONError wraps a concrete Code and Message, `Code` is // is an integer error code, `Message` is the error message. type JSONError struct { Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` } func (e *JSONError) Error() string { return e.Message } // JSONProgress describes a Progress. terminalFd is the fd of the current terminal, // Start is the initial value for the operation. Current is the current status and // value of the progress made towards Total. Total is the end value describing when // we made 100% progress for an operation. type JSONProgress struct { terminalFd uintptr Current int64 `json:"current,omitempty"` Total int64 `json:"total,omitempty"` Start int64 `json:"start,omitempty"` // If true, don't show xB/yB HideCounts bool `json:"hidecounts,omitempty"` Units string `json:"units,omitempty"` nowFunc func() time.Time winSize int } func (p *JSONProgress) String() string { var ( width = p.width() pbBox string numbersBox string timeLeftBox string ) if p.Current <= 0 && p.Total <= 0 { return "" } if p.Total <= 0 { switch p.Units { case "": current := units.HumanSize(float64(p.Current)) return fmt.Sprintf("%8v", current) default: return fmt.Sprintf("%d %s", p.Current, p.Units) } } percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 } if width > 110 { // this number can't be negative gh#7136 numSpaces := 0 if 50-percentage > 0 { numSpaces = 50 - percentage } pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } switch { case p.HideCounts: case p.Units == "": // no units, use bytes current := units.HumanSize(float64(p.Current)) total := units.HumanSize(float64(p.Total)) numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > p.Total { // remove total display if the reported current is wonky. numbersBox = fmt.Sprintf("%8v", current) } default: numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) if p.Current > p.Total { // remove total display if the reported current is wonky. numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) } } if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := p.now().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox } // shim for testing func (p *JSONProgress) now() time.Time { if p.nowFunc == nil { p.nowFunc = func() time.Time { return time.Now().UTC() } } return p.nowFunc() } // shim for testing func (p *JSONProgress) width() int { if p.winSize != 0 { return p.winSize } ws, err := term.GetWinsize(p.terminalFd) if err == nil { return int(ws.Width) } return 200 } // JSONMessage defines a message struct. It describes // the created time, where it from, status, ID of the // message. It's used for docker events. type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated // Aux contains out-of-band data, such as digests for push signing and image id after building. Aux *json.RawMessage `json:"aux,omitempty"` } func clearLine(out io.Writer) { eraseMode := aec.EraseModes.All cl := aec.EraseLine(eraseMode) fmt.Fprint(out, cl) } func cursorUp(out io.Writer, l uint) { fmt.Fprint(out, aec.Up(l)) } func cursorDown(out io.Writer, l uint) { fmt.Fprint(out, aec.Down(l)) } // Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the // entire current line when displaying the progressbar. func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("authentication is required") } return jm.Error } var endl string if isTerminal && jm.Stream == "" && jm.Progress != nil { clearLine(out) endl = "\r" fmt.Fprintf(out, endl) } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.TimeNano != 0 { fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) } else if jm.Time != 0 { fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) } if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { fmt.Fprintf(out, "%s%s", jm.Stream, endl) } else { fmt.Fprintf(out, "%s%s\n", jm.Status, endl) } return nil } // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) ids = make(map[string]uint) ) for { var diff uint var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } return err } if jm.Aux != nil { if auxCallback != nil { auxCallback(jm) } continue } if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { // NOTE: This approach of using len(id) to // figure out the number of lines of history // only works as long as we clear the history // when we output something that's not // accounted for in the map, such as a line // with no ID. line = uint(len(ids)) ids[jm.ID] = line if isTerminal { fmt.Fprintf(out, "\n") } } diff = uint(len(ids)) - line if isTerminal { cursorUp(out, diff) } } else { // When outputting something that isn't progress // output, clear the history of previous lines. We // don't want progress entries from some previous // operation to be updated (for example, pull -a // with multiple tags). ids = make(map[string]uint) } err := jm.Display(out, isTerminal) if jm.ID != "" && isTerminal { cursorDown(out, diff) } if err != nil { return err } } return nil } type stream interface { io.Writer FD() uintptr IsTerminal() bool } // DisplayJSONMessagesToStream prints json messages to the output stream func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) }
9,395
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" "sync" ) // WriteFlusher wraps the Write and Flush operation ensuring that every write // is a flush. In addition, the Close method can be called to intercept // Read/Write calls if the targets lifecycle has already ended. type WriteFlusher struct { w io.Writer flusher flusher flushed chan struct{} flushedOnce sync.Once closed chan struct{} closeLock sync.Mutex } type flusher interface { Flush() } var errWriteFlusherClosed = io.EOF func (wf *WriteFlusher) Write(b []byte) (n int, err error) { select { case <-wf.closed: return 0, errWriteFlusherClosed default: } n, err = wf.w.Write(b) wf.Flush() // every write is a flush. return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { select { case <-wf.closed: return default: } wf.flushedOnce.Do(func() { close(wf.flushed) }) wf.flusher.Flush() } // Flushed returns the state of flushed. // If it's flushed, return true, or else it return false. func (wf *WriteFlusher) Flushed() bool { // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to // be used to detect whether or a response code has been issued or not. // Another hook should be used instead. var flushed bool select { case <-wf.flushed: flushed = true default: } return flushed } // Close closes the write flusher, disallowing any further writes to the // target. After the flusher is closed, all calls to write or flush will // result in an error. func (wf *WriteFlusher) Close() error { wf.closeLock.Lock() defer wf.closeLock.Unlock() select { case <-wf.closed: return errWriteFlusherClosed default: close(wf.closed) } return nil } // NewWriteFlusher returns a new WriteFlusher. func NewWriteFlusher(w io.Writer) *WriteFlusher { var fl flusher if f, ok := w.(flusher); ok { fl = f } else { fl = &NopFlusher{} } return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} }
9,396
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" "io" ) var errBufferFull = errors.New("buffer is full") type fixedBuffer struct { buf []byte pos int lastRead int } func (b *fixedBuffer) Write(p []byte) (int, error) { n := copy(b.buf[b.pos:cap(b.buf)], p) b.pos += n if n < len(p) { if b.pos == cap(b.buf) { return n, errBufferFull } return n, io.ErrShortWrite } return n, nil } func (b *fixedBuffer) Read(p []byte) (int, error) { n := copy(p, b.buf[b.lastRead:b.pos]) b.lastRead += n return n, nil } func (b *fixedBuffer) Len() int { return b.pos - b.lastRead } func (b *fixedBuffer) Cap() int { return cap(b.buf) } func (b *fixedBuffer) Reset() { b.pos = 0 b.lastRead = 0 b.buf = b.buf[:0] } func (b *fixedBuffer) String() string { return string(b.buf[b.lastRead:b.pos]) }
9,397
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" "io/ioutil" "os" "path/filepath" ) // NewAtomicFileWriter returns WriteCloser so that writing to it writes to a // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return nil, err } abspath, err := filepath.Abs(filename) if err != nil { return nil, err } return &atomicFileWriter{ f: f, fn: abspath, perm: perm, }, nil } // AtomicWriteFile atomically writes data to a file named by filename. func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { f, err := NewAtomicFileWriter(filename, perm) if err != nil { return err } n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite f.(*atomicFileWriter).writeErr = err } if err1 := f.Close(); err == nil { err = err1 } return err } type atomicFileWriter struct { f *os.File fn string writeErr error perm os.FileMode } func (w *atomicFileWriter) Write(dt []byte) (int, error) { n, err := w.f.Write(dt) if err != nil { w.writeErr = err } return n, err } func (w *atomicFileWriter) Close() (retErr error) { defer func() { if retErr != nil || w.writeErr != nil { os.Remove(w.f.Name()) } }() if err := w.f.Sync(); err != nil { w.f.Close() return err } if err := w.f.Close(); err != nil { return err } if err := os.Chmod(w.f.Name(), w.perm); err != nil { return err } if w.writeErr == nil { return os.Rename(w.f.Name(), w.fn) } return nil } // AtomicWriteSet is used to atomically write a set // of files and ensure they are visible at the same time. // Must be committed to a new directory. type AtomicWriteSet struct { root string } // NewAtomicWriteSet creates a new atomic write set to // atomically create a set of files. The given directory // is used as the base directory for storing files before // commit. If no temporary directory is given the system // default is used. func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { td, err := ioutil.TempDir(tmpDir, "write-set-") if err != nil { return nil, err } return &AtomicWriteSet{ root: td, }, nil } // WriteFile writes a file to the set, guaranteeing the file // has been synced. func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { return err } n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite } if err1 := f.Close(); err == nil { err = err1 } return err } type syncFileCloser struct { *os.File } func (w syncFileCloser) Close() error { err := w.File.Sync() if err1 := w.File.Close(); err == nil { err = err1 } return err } // FileWriter opens a file writer inside the set. The file // should be synced and closed before calling commit. func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) if err != nil { return nil, err } return syncFileCloser{f}, nil } // Cancel cancels the set and removes all temporary data // created in the set. func (ws *AtomicWriteSet) Cancel() error { return os.RemoveAll(ws.root) } // Commit moves all created files to the target directory. The // target directory must not exist and the parent of the target // directory must exist. func (ws *AtomicWriteSet) Commit(target string) error { return os.Rename(ws.root, target) } // String returns the location the set is writing to. func (ws *AtomicWriteSet) String() string { return ws.root }
9,398
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" "io" "sync" ) // maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 // minCap is the lowest capacity to use in byte slices that buffer data const minCap = 64 // blockThreshold is the minimum number of bytes in the buffer which will cause // a write to BytesPipe to block when allocating a new slice. const blockThreshold = 1e6 var ( // ErrClosed is returned when Write is called on a closed BytesPipe. ErrClosed = errors.New("write to closed BytesPipe") bufPools = make(map[int]*sync.Pool) bufPoolsLock sync.Mutex ) // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). // All written data may be read at most once. Also, BytesPipe allocates // and releases new byte slices to adjust to current needs, so the buffer // won't be overgrown after peak loads. type BytesPipe struct { mu sync.Mutex wait *sync.Cond buf []*fixedBuffer bufLen int closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. // If buf is nil, then it will be initialized with slice which cap is 64. // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). func NewBytesPipe() *BytesPipe { bp := &BytesPipe{} bp.buf = append(bp.buf, getBuffer(minCap)) bp.wait = sync.NewCond(&bp.mu) return bp } // Write writes p to BytesPipe. // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() written := 0 loop0: for { if bp.closeErr != nil { bp.mu.Unlock() return written, ErrClosed } if len(bp.buf) == 0 { bp.buf = append(bp.buf, getBuffer(64)) } // get the last buffer b := bp.buf[len(bp.buf)-1] n, err := b.Write(p) written += n bp.bufLen += n // errBufferFull is an error we expect to get if the buffer is full if err != nil && err != errBufferFull { bp.wait.Broadcast() bp.mu.Unlock() return written, err } // if there was enough room to write all then break if len(p) == n { break } // more data: write to the next slice p = p[n:] // make sure the buffer doesn't grow too big from this write for bp.bufLen >= blockThreshold { bp.wait.Wait() if bp.closeErr != nil { continue loop0 } } // add new byte slice to the buffers slice and continue writing nextCap := b.Cap() * 2 if nextCap > maxCap { nextCap = maxCap } bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() bp.mu.Unlock() return written, nil } // CloseWithError causes further reads from a BytesPipe to return immediately. func (bp *BytesPipe) CloseWithError(err error) error { bp.mu.Lock() if err != nil { bp.closeErr = err } else { bp.closeErr = io.EOF } bp.wait.Broadcast() bp.mu.Unlock() return nil } // Close causes further reads from a BytesPipe to return immediately. func (bp *BytesPipe) Close() error { return bp.CloseWithError(nil) } // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() if bp.bufLen == 0 { if bp.closeErr != nil { err := bp.closeErr bp.mu.Unlock() return 0, err } bp.wait.Wait() if bp.bufLen == 0 && bp.closeErr != nil { err := bp.closeErr bp.mu.Unlock() return 0, err } } for bp.bufLen > 0 { b := bp.buf[0] read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error n += read bp.bufLen -= read if b.Len() == 0 { // it's empty so return it to the pool and move to the next one returnBuffer(b) bp.buf[0] = nil bp.buf = bp.buf[1:] } if len(p) == read { break } p = p[read:] } bp.wait.Broadcast() bp.mu.Unlock() return } func returnBuffer(b *fixedBuffer) { b.Reset() bufPoolsLock.Lock() pool := bufPools[b.Cap()] bufPoolsLock.Unlock() if pool != nil { pool.Put(b) } } func getBuffer(size int) *fixedBuffer { bufPoolsLock.Lock() pool, ok := bufPools[size] if !ok { pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} bufPools[size] = pool } bufPoolsLock.Unlock() return pool.Get().(*fixedBuffer) }
9,399