index
int64
0
0
repo_id
stringlengths
21
232
file_path
stringlengths
34
259
content
stringlengths
1
14.1M
__index_level_0__
int64
0
10k
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/port.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // Port An open port on a container // swagger:model Port type Port struct { // Host IP address that the container's port is mapped to IP string `json:"IP,omitempty"` // Port on the container // Required: true PrivatePort uint16 `json:"PrivatePort"` // Port exposed on the host PublicPort uint16 `json:"PublicPort,omitempty"` // type // Required: true Type string `json:"Type"` }
9,500
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // Plugin A plugin for the Engine API // swagger:model Plugin type Plugin struct { // config // Required: true Config PluginConfig `json:"Config"` // True if the plugin is running. False if the plugin is not running, only installed. // Required: true Enabled bool `json:"Enabled"` // Id ID string `json:"Id,omitempty"` // name // Required: true Name string `json:"Name"` // plugin remote reference used to push/pull the plugin PluginReference string `json:"PluginReference,omitempty"` // settings // Required: true Settings PluginSettings `json:"Settings"` } // PluginConfig The config of a plugin. // swagger:model PluginConfig type PluginConfig struct { // args // Required: true Args PluginConfigArgs `json:"Args"` // description // Required: true Description string `json:"Description"` // Docker Version used to create the plugin DockerVersion string `json:"DockerVersion,omitempty"` // documentation // Required: true Documentation string `json:"Documentation"` // entrypoint // Required: true Entrypoint []string `json:"Entrypoint"` // env // Required: true Env []PluginEnv `json:"Env"` // interface // Required: true Interface PluginConfigInterface `json:"Interface"` // ipc host // Required: true IpcHost bool `json:"IpcHost"` // linux // Required: true Linux PluginConfigLinux `json:"Linux"` // mounts // Required: true Mounts []PluginMount `json:"Mounts"` // network // Required: true Network PluginConfigNetwork `json:"Network"` // pid host // Required: true PidHost bool `json:"PidHost"` // propagated mount // Required: true PropagatedMount string `json:"PropagatedMount"` // user User PluginConfigUser `json:"User,omitempty"` // work dir // Required: true WorkDir string `json:"WorkDir"` // rootfs Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` } // PluginConfigArgs plugin config args // swagger:model PluginConfigArgs type PluginConfigArgs struct { // description // Required: true Description string `json:"Description"` // name // Required: true Name string `json:"Name"` // settable // Required: true Settable []string `json:"Settable"` // value // Required: true Value []string `json:"Value"` } // PluginConfigInterface The interface between Docker and the plugin // swagger:model PluginConfigInterface type PluginConfigInterface struct { // Protocol to use for clients connecting to the plugin. ProtocolScheme string `json:"ProtocolScheme,omitempty"` // socket // Required: true Socket string `json:"Socket"` // types // Required: true Types []PluginInterfaceType `json:"Types"` } // PluginConfigLinux plugin config linux // swagger:model PluginConfigLinux type PluginConfigLinux struct { // allow all devices // Required: true AllowAllDevices bool `json:"AllowAllDevices"` // capabilities // Required: true Capabilities []string `json:"Capabilities"` // devices // Required: true Devices []PluginDevice `json:"Devices"` } // PluginConfigNetwork plugin config network // swagger:model PluginConfigNetwork type PluginConfigNetwork struct { // type // Required: true Type string `json:"Type"` } // PluginConfigRootfs plugin config rootfs // swagger:model PluginConfigRootfs type PluginConfigRootfs struct { // diff ids DiffIds []string `json:"diff_ids"` // type Type string `json:"type,omitempty"` } // PluginConfigUser plugin config user // swagger:model PluginConfigUser type PluginConfigUser struct { // g ID GID uint32 `json:"GID,omitempty"` // UID UID uint32 `json:"UID,omitempty"` } // PluginSettings Settings that can be modified by users. // swagger:model PluginSettings type PluginSettings struct { // args // Required: true Args []string `json:"Args"` // devices // Required: true Devices []PluginDevice `json:"Devices"` // env // Required: true Env []string `json:"Env"` // mounts // Required: true Mounts []PluginMount `json:"Mounts"` }
9,501
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/auth.go
package types // import "github.com/docker/docker/api/types" // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. // This field is deprecated and will be removed in a later // version of docker. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` // IdentityToken is used to authenticate the user and get // an access token for the registry. IdentityToken string `json:"identitytoken,omitempty"` // RegistryToken is a bearer token to be sent to a registry RegistryToken string `json:"registrytoken,omitempty"` }
9,502
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/configs.go
package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" ) // configs holds structs used for internal communication between the // frontend (such as an http server) and the backend (such as the // docker daemon). // ContainerCreateConfig is the parameter set to ContainerCreate() type ContainerCreateConfig struct { Name string Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool } // ContainerRmConfig holds arguments for the container remove // operation. This struct is used to tell the backend what operations // to perform. type ContainerRmConfig struct { ForceRemove, RemoveVolume, RemoveLink bool } // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard error AttachStdout bool // Attach the standard output Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach Env []string // Environment variables WorkingDir string // Working directory Cmd []string // Execution commands and args } // PluginRmConfig holds arguments for plugin remove. type PluginRmConfig struct { ForceRemove bool } // PluginEnableConfig holds arguments for plugin enable type PluginEnableConfig struct { Timeout int } // PluginDisableConfig holds arguments for plugin disable. type PluginDisableConfig struct { ForceDisable bool } // NetworkListConfig stores the options available for listing networks type NetworkListConfig struct { // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here Detailed bool Verbose bool }
9,503
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/types.go
package types // import "github.com/docker/docker/api/types" import ( "errors" "fmt" "io" "os" "strings" "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/go-connections/nat" ) // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { Type string Layers []string `json:",omitempty"` BaseLayer string `json:",omitempty"` } // ImageInspect contains response of Engine API: // GET "/images/{name:.*}/json" type ImageInspect struct { ID string `json:"Id"` RepoTags []string RepoDigests []string Parent string Comment string Created string Container string ContainerConfig *container.Config DockerVersion string Author string Config *container.Config Architecture string Os string OsVersion string `json:",omitempty"` Size int64 VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS Metadata ImageMetadata } // ImageMetadata contains engine-local data about the image type ImageMetadata struct { LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: // GET "/containers/json" type Container struct { ID string `json:"Id"` Names []string Image string ImageID string Command string Created int64 Ports []Port SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string State string Status string HostConfig struct { NetworkMode string `json:",omitempty"` } NetworkSettings *SummaryNetworkSettings Mounts []MountPoint } // CopyConfig contains request body of Engine API: // POST "/containers/"+containerID+"/copy" type CopyConfig struct { Resource string } // ContainerPathStat is used to encode the header from // GET "/containers/{name:.*}/archive" // "Name" is the file or directory name. type ContainerPathStat struct { Name string `json:"name"` Size int64 `json:"size"` Mode os.FileMode `json:"mode"` Mtime time.Time `json:"mtime"` LinkTarget string `json:"linkTarget"` } // ContainerStats contains response of Engine API: // GET "/stats" type ContainerStats struct { Body io.ReadCloser `json:"body"` OSType string `json:"ostype"` } // Ping contains response of Engine API: // GET "/_ping" type Ping struct { APIVersion string OSType string Experimental bool BuilderVersion BuilderVersion } // ComponentVersion describes the version information for a specific component. type ComponentVersion struct { Name string Version string Details map[string]string `json:",omitempty"` } // Version contains response of Engine API: // GET "/version" type Version struct { Platform struct{ Name string } `json:",omitempty"` Components []ComponentVersion `json:",omitempty"` // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility Version string APIVersion string `json:"ApiVersion"` MinAPIVersion string `json:"MinAPIVersion,omitempty"` GitCommit string GoVersion string Os string Arch string KernelVersion string `json:",omitempty"` Experimental bool `json:",omitempty"` BuildTime string `json:",omitempty"` } // Commit holds the Git-commit (SHA1) that a binary was built from, as reported // in the version-string of external tools, such as containerd, or runC. type Commit struct { ID string // ID is the actual commit ID of external tool. Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. } // Info contains response of Engine API: // GET "/info" type Info struct { ID string Containers int ContainersRunning int ContainersPaused int ContainersStopped int Images int Driver string DriverStatus [][2]string SystemStatus [][2]string Plugins PluginsInfo MemoryLimit bool SwapLimit bool KernelMemory bool KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool PidsLimit bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` Debug bool NFd int OomKillDisable bool NGoroutines int SystemTime string LoggingDriver string CgroupDriver string NEventsListener int KernelVersion string OperatingSystem string OSVersion string OSType string Architecture string IndexServerAddress string RegistryConfig *registry.ServiceConfig NCPU int MemTotal int64 GenericResources []swarm.GenericResource DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` NoProxy string Name string Labels []string ExperimentalBuild bool ServerVersion string ClusterStore string ClusterAdvertise string Runtimes map[string]Runtime DefaultRuntime string Swarm swarm.Info // LiveRestoreEnabled determines whether containers should be kept // running when the daemon is shutdown or upon daemon start if // running containers are detected LiveRestoreEnabled bool Isolation container.Isolation InitBinary string ContainerdCommit Commit RuncCommit Commit InitCommit Commit SecurityOptions []string ProductLicense string `json:",omitempty"` Warnings []string } // KeyValue holds a key/value pair type KeyValue struct { Key, Value string } // SecurityOpt contains the name and options of a security option type SecurityOpt struct { Name string Options []KeyValue } // DecodeSecurityOptions decodes a security options string slice to a type safe // SecurityOpt func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { so := []SecurityOpt{} for _, opt := range opts { // support output from a < 1.13 docker daemon if !strings.Contains(opt, "=") { so = append(so, SecurityOpt{Name: opt}) continue } secopt := SecurityOpt{} split := strings.Split(opt, ",") for _, s := range split { kv := strings.SplitN(s, "=", 2) if len(kv) != 2 { return nil, fmt.Errorf("invalid security option %q", s) } if kv[0] == "" || kv[1] == "" { return nil, errors.New("invalid empty security option") } if kv[0] == "name" { secopt.Name = kv[1] continue } secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) } so = append(so, secopt) } return so, nil } // PluginsInfo is a temp struct holding Plugins name // registered with docker daemon. It is used by Info struct type PluginsInfo struct { // List of Volume plugins registered Volume []string // List of Network plugins registered Network []string // List of Authorization plugins registered Authorization []string // List of Log plugins registered Log []string } // ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { // ExecStart will first check if it's detached Detach bool // Check if there's a tty Tty bool } // HealthcheckResult stores information about a single run of a healthcheck probe type HealthcheckResult struct { Start time.Time // Start is the time this check started End time.Time // End is the time this check ended ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe Output string // Output from last check } // Health states const ( NoHealthcheck = "none" // Indicates there is no healthcheck Starting = "starting" // Starting indicates that the container is not yet ready Healthy = "healthy" // Healthy indicates that the container is running correctly Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem ) // Health stores information about the container's healthcheck results type Health struct { Status string // Status is one of Starting, Healthy or Unhealthy FailingStreak int // FailingStreak is the number of consecutive failures Log []*HealthcheckResult // Log contains the last few results (oldest first) } // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" Running bool Paused bool Restarting bool OOMKilled bool Dead bool Pid int ExitCode int Error string StartedAt string FinishedAt string Health *Health `json:",omitempty"` } // ContainerNode stores information about the node that a container // is running on. It's only available in Docker Swarm type ContainerNode struct { ID string IPAddress string `json:"IP"` Addr string Name string Cpus int Memory int64 Labels map[string]string } // ContainerJSONBase contains response of Engine API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { ID string `json:"Id"` Created string Path string Args []string State *ContainerState Image string ResolvConfPath string HostnamePath string HostsPath string LogPath string Node *ContainerNode `json:",omitempty"` Name string RestartCount int Driver string Platform string MountLabel string ProcessLabel string AppArmorProfile string ExecIDs []string HostConfig *container.HostConfig GraphDriver GraphDriverData SizeRw *int64 `json:",omitempty"` SizeRootFs *int64 `json:",omitempty"` } // ContainerJSON is newly used struct along with MountPoint type ContainerJSON struct { *ContainerJSONBase Mounts []MountPoint Config *container.Config NetworkSettings *NetworkSettings } // NetworkSettings exposes the network settings in the api type NetworkSettings struct { NetworkSettingsBase DefaultNetworkSettings Networks map[string]*network.EndpointSettings } // SummaryNetworkSettings provides a summary of container's networks // in /containers/json type SummaryNetworkSettings struct { Networks map[string]*network.EndpointSettings } // NetworkSettingsBase holds basic information about networks type NetworkSettingsBase struct { Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) SandboxID string // SandboxID uniquely represents a container's network stack HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port SandboxKey string // SandboxKey identifies the sandbox SecondaryIPAddresses []network.Address SecondaryIPv6Addresses []network.Address } // DefaultNetworkSettings holds network information // during the 2 release deprecation period. // It will be removed in Docker 1.11. type DefaultNetworkSettings struct { EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox Gateway string // Gateway holds the gateway address for the network GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address IPAddress string // IPAddress holds the IPv4 address for the network IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 MacAddress string // MacAddress holds the MAC address for the network } // MountPoint represents a mount point configuration inside the container. // This is used for reporting the mountpoints in use by a container. type MountPoint struct { Type mount.Type `json:",omitempty"` Name string `json:",omitempty"` Source string Destination string Driver string `json:",omitempty"` Mode string RW bool Propagation mount.Propagation } // NetworkResource is the body of the "get network" http response message type NetworkResource struct { Name string // Name is the requested name of the network ID string `json:"Id"` // ID uniquely identifies a network on a single machine Created time.Time // Created is the time the network created Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 IPAM network.IPAM // IPAM is the network's IP Address Management Internal bool // Internal represents if the network is used internal only Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. Containers map[string]EndpointResource // Containers contains endpoints belonging to the network Options map[string]string // Options holds the network specific options to use for when creating the network Labels map[string]string // Labels holds metadata specific to the network being created Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network Services map[string]network.ServiceInfo `json:",omitempty"` } // EndpointResource contains network resources allocated and used for a container in a network type EndpointResource struct { Name string EndpointID string MacAddress string IPv4Address string IPv6Address string } // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { // Check for networks with duplicate names. // Network is primarily keyed based on a random ID and not on the name. // Network name is strictly a user-friendly alias to the network // which is uniquely identified using ID. // And there is no guaranteed way to check for duplicates. // Option CheckDuplicate is there to provide a best effort checking of any networks // which has the same name but it is not guaranteed to catch all name collisions. CheckDuplicate bool Driver string Scope string EnableIPv6 bool IPAM *network.IPAM Internal bool Attachable bool Ingress bool ConfigOnly bool ConfigFrom *network.ConfigReference Options map[string]string Labels map[string]string } // NetworkCreateRequest is the request message sent to the server for network create call. type NetworkCreateRequest struct { NetworkCreate Name string } // NetworkCreateResponse is the response message sent by the server for network create call type NetworkCreateResponse struct { ID string `json:"Id"` Warning string } // NetworkConnect represents the data to be used to connect a container to the network type NetworkConnect struct { Container string EndpointConfig *network.EndpointSettings `json:",omitempty"` } // NetworkDisconnect represents the data to be used to disconnect a container from the network type NetworkDisconnect struct { Container string Force bool } // NetworkInspectOptions holds parameters to inspect network type NetworkInspectOptions struct { Scope string Verbose bool } // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint } // Runtime describes an OCI runtime type Runtime struct { Path string `json:"path"` Args []string `json:"runtimeArgs,omitempty"` } // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { LayersSize int64 Images []*ImageSummary Containers []*Container Volumes []*Volume BuildCache []*BuildCache BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: // POST "/containers/prune" type ContainersPruneReport struct { ContainersDeleted []string SpaceReclaimed uint64 } // VolumesPruneReport contains the response for Engine API: // POST "/volumes/prune" type VolumesPruneReport struct { VolumesDeleted []string SpaceReclaimed uint64 } // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { ImagesDeleted []ImageDeleteResponseItem SpaceReclaimed uint64 } // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { CachesDeleted []string SpaceReclaimed uint64 } // NetworksPruneReport contains the response for Engine API: // POST "/networks/prune" type NetworksPruneReport struct { NetworksDeleted []string } // SecretCreateResponse contains the information returned to a client // on the creation of a new secret. type SecretCreateResponse struct { // ID is the id of the created secret. ID string } // SecretListOptions holds parameters to list secrets type SecretListOptions struct { Filters filters.Args } // ConfigCreateResponse contains the information returned to a client // on the creation of a new config. type ConfigCreateResponse struct { // ID is the id of the created config. ID string } // ConfigListOptions holds parameters to list configs type ConfigListOptions struct { Filters filters.Args } // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. type PushResult struct { Tag string Digest string Size int } // BuildResult contains the image id of a successful build type BuildResult struct { ID string } // BuildCache contains information about a build cache record type BuildCache struct { ID string Parent string Type string Description string InUse bool Shared bool Size int64 CreatedAt time.Time LastUsedAt *time.Time UsageCount int } // BuildCachePruneOptions hold parameters to prune the build cache type BuildCachePruneOptions struct { All bool KeepStorage int64 Filters filters.Args }
9,504
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/id_response.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // IDResponse Response to an API call that returns just an Id // swagger:model IdResponse type IDResponse struct { // The id of the newly created object. // Required: true ID string `json:"Id"` }
9,505
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/graph_driver_data.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // GraphDriverData Information about a container's graph driver. // swagger:model GraphDriverData type GraphDriverData struct { // data // Required: true Data map[string]string `json:"Data"` // name // Required: true Name string `json:"Name"` }
9,506
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ImageDeleteResponseItem image delete response item // swagger:model ImageDeleteResponseItem type ImageDeleteResponseItem struct { // The image ID of an image that was deleted Deleted string `json:"Deleted,omitempty"` // The image ID of an image that was untagged Untagged string `json:"Untagged,omitempty"` }
9,507
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/error_response.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ErrorResponse Represents an error. // swagger:model ErrorResponse type ErrorResponse struct { // The error message. // Required: true Message string `json:"message"` }
9,508
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin_responses.go
package types // import "github.com/docker/docker/api/types" import ( "encoding/json" "fmt" "sort" ) // PluginsListResponse contains the response for the Engine API type PluginsListResponse []*Plugin // UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { versionIndex := len(p) prefixIndex := 0 if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { return fmt.Errorf("%q is not a plugin interface type", p) } p = p[1 : len(p)-1] loop: for i, b := range p { switch b { case '.': prefixIndex = i case '/': versionIndex = i break loop } } t.Prefix = string(p[:prefixIndex]) t.Capability = string(p[prefixIndex+1 : versionIndex]) if versionIndex < len(p) { t.Version = string(p[versionIndex+1:]) } return nil } // MarshalJSON implements json.Marshaler for PluginInterfaceType func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } // String implements fmt.Stringer for PluginInterfaceType func (t PluginInterfaceType) String() string { return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) } // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. type PluginPrivilege struct { Name string Description string Value []string } // PluginPrivileges is a list of PluginPrivilege type PluginPrivileges []PluginPrivilege func (s PluginPrivileges) Len() int { return len(s) } func (s PluginPrivileges) Less(i, j int) bool { return s[i].Name < s[j].Name } func (s PluginPrivileges) Swap(i, j int) { sort.Strings(s[i].Value) sort.Strings(s[j].Value) s[i], s[j] = s[j], s[i] }
9,509
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/error_response_ext.go
package types // Error returns the error message func (e ErrorResponse) Error() string { return e.Message }
9,510
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/service_update_response.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ServiceUpdateResponse service update response // swagger:model ServiceUpdateResponse type ServiceUpdateResponse struct { // Optional warning messages Warnings []string `json:"Warnings"` }
9,511
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin_env.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // PluginEnv plugin env // swagger:model PluginEnv type PluginEnv struct { // description // Required: true Description string `json:"Description"` // name // Required: true Name string `json:"Name"` // settable // Required: true Settable []string `json:"Settable"` // value // Required: true Value *string `json:"Value"` }
9,512
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/volume.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // Volume volume // swagger:model Volume type Volume struct { // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` // Name of the volume driver used by the volume. // Required: true Driver string `json:"Driver"` // User-defined key/value metadata. // Required: true Labels map[string]string `json:"Labels"` // Mount path of the volume on the host. // Required: true Mountpoint string `json:"Mountpoint"` // Name of the volume. // Required: true Name string `json:"Name"` // The driver specific options used when creating the volume. // Required: true Options map[string]string `json:"Options"` // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. // Required: true Scope string `json:"Scope"` // Low-level details about the volume, provided by the volume driver. // Details are returned as a map with key/value pairs: // `{"key":"value","key2":"value2"}`. // // The `Status` field is optional, and is omitted if the volume driver // does not support this feature. // Status map[string]interface{} `json:"Status,omitempty"` // usage data UsageData *VolumeUsageData `json:"UsageData,omitempty"` } // VolumeUsageData Usage details about the volume. This information is used by the // `GET /system/df` endpoint, and omitted in other endpoints. // // swagger:model VolumeUsageData type VolumeUsageData struct { // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. // // Required: true RefCount int64 `json:"RefCount"` // Amount of disk space used by the volume (in bytes). This information // is only available for volumes created with the `"local"` volume // driver. For volumes created with other volume drivers, this field // is set to `-1` ("not available") // // Required: true Size int64 `json:"Size"` }
9,513
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // PluginInterfaceType plugin interface type // swagger:model PluginInterfaceType type PluginInterfaceType struct { // capability // Required: true Capability string `json:"Capability"` // prefix // Required: true Prefix string `json:"Prefix"` // version // Required: true Version string `json:"Version"` }
9,514
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/stats.go
// Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. package types // import "github.com/docker/docker/api/types" import "time" // ThrottlingData stores CPU throttling stats of one running container. // Not used on Windows. type ThrottlingData struct { // Number of periods with throttling active Periods uint64 `json:"periods"` // Number of periods when the container hits its throttling limit. ThrottledPeriods uint64 `json:"throttled_periods"` // Aggregate time the container was throttled for in nanoseconds. ThrottledTime uint64 `json:"throttled_time"` } // CPUUsage stores All CPU stats aggregated since container inception. type CPUUsage struct { // Total CPU time consumed. // Units: nanoseconds (Linux) // Units: 100's of nanoseconds (Windows) TotalUsage uint64 `json:"total_usage"` // Total CPU time consumed per core (Linux). Not used on Windows. // Units: nanoseconds. PercpuUsage []uint64 `json:"percpu_usage,omitempty"` // Time spent by tasks of the cgroup in kernel mode (Linux). // Time spent by all container processes in kernel mode (Windows). // Units: nanoseconds (Linux). // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. UsageInKernelmode uint64 `json:"usage_in_kernelmode"` // Time spent by tasks of the cgroup in user mode (Linux). // Time spent by all container processes in user mode (Windows). // Units: nanoseconds (Linux). // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers UsageInUsermode uint64 `json:"usage_in_usermode"` } // CPUStats aggregates and wraps all CPU related info of container type CPUStats struct { // CPU Usage. Linux and Windows. CPUUsage CPUUsage `json:"cpu_usage"` // System Usage. Linux only. SystemUsage uint64 `json:"system_cpu_usage,omitempty"` // Online CPUs. Linux only. OnlineCPUs uint32 `json:"online_cpus,omitempty"` // Throttling Data. Linux only. ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } // MemoryStats aggregates all memory stats since container inception on Linux. // Windows returns stats for commit and private working set only. type MemoryStats struct { // Linux Memory Stats // current res_counter usage for memory Usage uint64 `json:"usage,omitempty"` // maximum usage ever recorded. MaxUsage uint64 `json:"max_usage,omitempty"` // TODO(vishh): Export these as stronger types. // all the stats exported via memory.stat. Stats map[string]uint64 `json:"stats,omitempty"` // number of times memory usage hits limits. Failcnt uint64 `json:"failcnt,omitempty"` Limit uint64 `json:"limit,omitempty"` // Windows Memory Stats // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx // committed bytes Commit uint64 `json:"commitbytes,omitempty"` // peak committed bytes CommitPeak uint64 `json:"commitpeakbytes,omitempty"` // private working set PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` } // BlkioStatEntry is one small entity to store a piece of Blkio stats // Not used on Windows. type BlkioStatEntry struct { Major uint64 `json:"major"` Minor uint64 `json:"minor"` Op string `json:"op"` Value uint64 `json:"value"` } // BlkioStats stores All IO service stats for data read and write. // This is a Linux specific structure as the differences between expressing // block I/O on Windows and Linux are sufficiently significant to make // little sense attempting to morph into a combined structure. type BlkioStats struct { // number of bytes transferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` } // StorageStats is the disk I/O stats for read/write on Windows. type StorageStats struct { ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` } // NetworkStats aggregates the network stats of one container type NetworkStats struct { // Bytes received. Windows and Linux. RxBytes uint64 `json:"rx_bytes"` // Packets received. Windows and Linux. RxPackets uint64 `json:"rx_packets"` // Received errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. RxErrors uint64 `json:"rx_errors"` // Incoming packets dropped. Windows and Linux. RxDropped uint64 `json:"rx_dropped"` // Bytes sent. Windows and Linux. TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` // Sent errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. TxDropped uint64 `json:"tx_dropped"` // Endpoint ID. Not used on Linux. EndpointID string `json:"endpoint_id,omitempty"` // Instance ID. Not used on Linux. InstanceID string `json:"instance_id,omitempty"` } // PidsStats contains the stats of a container's pids type PidsStats struct { // Current is the number of pids in the cgroup Current uint64 `json:"current,omitempty"` // Limit is the hard limit on the number of pids in the cgroup. // A "Limit" of 0 means that there is no limit. Limit uint64 `json:"limit,omitempty"` } // Stats is Ultimate struct aggregating all types of stats of one container type Stats struct { // Common stats Read time.Time `json:"read"` PreRead time.Time `json:"preread"` // Linux specific stats, not populated on Windows. PidsStats PidsStats `json:"pids_stats,omitempty"` BlkioStats BlkioStats `json:"blkio_stats,omitempty"` // Windows specific stats, not populated on Linux. NumProcs uint32 `json:"num_procs"` StorageStats StorageStats `json:"storage_stats,omitempty"` // Shared stats CPUStats CPUStats `json:"cpu_stats,omitempty"` PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" MemoryStats MemoryStats `json:"memory_stats,omitempty"` } // StatsJSON is newly used Networks type StatsJSON struct { Stats Name string `json:"name,omitempty"` ID string `json:"id,omitempty"` // Networks request version >=1.21 Networks map[string]NetworkStats `json:"networks,omitempty"` }
9,515
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/image_summary.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ImageSummary image summary // swagger:model ImageSummary type ImageSummary struct { // containers // Required: true Containers int64 `json:"Containers"` // created // Required: true Created int64 `json:"Created"` // Id // Required: true ID string `json:"Id"` // labels // Required: true Labels map[string]string `json:"Labels"` // parent Id // Required: true ParentID string `json:"ParentId"` // repo digests // Required: true RepoDigests []string `json:"RepoDigests"` // repo tags // Required: true RepoTags []string `json:"RepoTags"` // shared size // Required: true SharedSize int64 `json:"SharedSize"` // size // Required: true Size int64 `json:"Size"` // virtual size // Required: true VirtualSize int64 `json:"VirtualSize"` }
9,516
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/plugin_mount.go
package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // PluginMount plugin mount // swagger:model PluginMount type PluginMount struct { // description // Required: true Description string `json:"Description"` // destination // Required: true Destination string `json:"Destination"` // name // Required: true Name string `json:"Name"` // options // Required: true Options []string `json:"Options"` // settable // Required: true Settable []string `json:"Settable"` // source // Required: true Source *string `json:"Source"` // type // Required: true Type string `json:"Type"` }
9,517
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/filters/parse.go
/*Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" "regexp" "strings" "github.com/docker/docker/api/types/versions" ) // Args stores a mapping of keys to a set of multiple values. type Args struct { fields map[string]map[string]bool } // KeyValuePair are used to initialize a new Args type KeyValuePair struct { Key string Value string } // Arg creates a new KeyValuePair for initializing Args func Arg(key, value string) KeyValuePair { return KeyValuePair{Key: key, Value: value} } // NewArgs returns a new Args populated with the initial args func NewArgs(initialArgs ...KeyValuePair) Args { args := Args{fields: map[string]map[string]bool{}} for _, arg := range initialArgs { args.Add(arg.Key, arg.Value) } return args } // MarshalJSON returns a JSON byte representation of the Args func (args Args) MarshalJSON() ([]byte, error) { if len(args.fields) == 0 { return []byte{}, nil } return json.Marshal(args.fields) } // ToJSON returns the Args as a JSON encoded string func ToJSON(a Args) (string, error) { if a.Len() == 0 { return "", nil } buf, err := json.Marshal(a) return string(buf), err } // ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 // then the encoded format will use an older legacy format where the values are a // list of strings, instead of a set. // // Deprecated: Use ToJSON func ToParamWithVersion(version string, a Args) (string, error) { if a.Len() == 0 { return "", nil } if version != "" && versions.LessThan(version, "1.22") { buf, err := json.Marshal(convertArgsToSlice(a.fields)) return string(buf), err } return ToJSON(a) } // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() if p == "" { return args, nil } raw := []byte(p) err := json.Unmarshal(raw, &args) if err == nil { return args, nil } // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { return args, err } args.fields = deprecatedArgs(deprecated) return args, nil } // UnmarshalJSON populates the Args from JSON encode bytes func (args Args) UnmarshalJSON(raw []byte) error { if len(raw) == 0 { return nil } return json.Unmarshal(raw, &args.fields) } // Get returns the list of values associated with the key func (args Args) Get(key string) []string { values := args.fields[key] if values == nil { return make([]string, 0) } slice := make([]string, 0, len(values)) for key := range values { slice = append(slice, key) } return slice } // Add a new value to the set of values func (args Args) Add(key, value string) { if _, ok := args.fields[key]; ok { args.fields[key][value] = true } else { args.fields[key] = map[string]bool{value: true} } } // Del removes a value from the set func (args Args) Del(key, value string) { if _, ok := args.fields[key]; ok { delete(args.fields[key], value) if len(args.fields[key]) == 0 { delete(args.fields, key) } } } // Len returns the number of keys in the mapping func (args Args) Len() int { return len(args.fields) } // MatchKVList returns true if all the pairs in sources exist as key=value // pairs in the mapping at key, or if there are no values at key. func (args Args) MatchKVList(key string, sources map[string]string) bool { fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } if len(sources) == 0 { return false } for value := range fieldValues { testKV := strings.SplitN(value, "=", 2) v, ok := sources[testKV[0]] if !ok { return false } if len(testKV) == 2 && testKV[1] != v { return false } } return true } // Match returns true if any of the values at key match the source string func (args Args) Match(field, source string) bool { if args.ExactMatch(field, source) { return true } fieldValues := args.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { continue } if match { return true } } return false } // ExactMatch returns true if the source matches exactly one of the values. func (args Args) ExactMatch(key, source string) bool { fieldValues, ok := args.fields[key] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true } // try to match full name value to avoid O(N) regular expression matching return fieldValues[source] } // UniqueExactMatch returns true if there is only one value and the source // matches exactly the value. func (args Args) UniqueExactMatch(key, source string) bool { fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } if len(args.fields[key]) != 1 { return false } // try to match full name value to avoid O(N) regular expression matching return fieldValues[source] } // FuzzyMatch returns true if the source matches exactly one value, or the // source has one of the values as a prefix. func (args Args) FuzzyMatch(key, source string) bool { if args.ExactMatch(key, source) { return true } fieldValues := args.fields[key] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true } } return false } // Contains returns true if the key exists in the mapping func (args Args) Contains(field string) bool { _, ok := args.fields[field] return ok } type invalidFilter string func (e invalidFilter) Error() string { return "Invalid filter '" + string(e) + "'" } func (invalidFilter) InvalidParameter() {} // Validate compared the set of accepted keys against the keys in the mapping. // An error is returned if any mapping keys are not in the accepted set. func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { return invalidFilter(name) } } return nil } // WalkValues iterates over the list of values for a key in the mapping and calls // op() for each value. If op returns an error the iteration stops and the // error is returned. func (args Args) WalkValues(field string, op func(value string) error) error { if _, ok := args.fields[field]; !ok { return nil } for v := range args.fields[field] { if err := op(v); err != nil { return err } } return nil } // Clone returns a copy of args. func (args Args) Clone() (newArgs Args) { newArgs.fields = make(map[string]map[string]bool, len(args.fields)) for k, m := range args.fields { var mm map[string]bool if m != nil { mm = make(map[string]bool, len(m)) for kk, v := range m { mm[kk] = v } } newArgs.fields[k] = mm } return newArgs } func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { values := map[string]bool{} for _, vv := range v { values[vv] = true } m[k] = values } return m } func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { m := map[string][]string{} for k, v := range f { values := []string{} for kk := range v { if v[kk] { values = append(values, kk) } } m[k] = values } return m }
9,518
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/versions/README.md
# Legacy API type versions This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. ## Package name conventions The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: 1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. 2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
9,519
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/versions/compare.go
package versions // import "github.com/docker/docker/api/types/versions" import ( "strconv" "strings" ) // compare compares two version strings // returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. func compare(v1, v2 string) int { var ( currTab = strings.Split(v1, ".") otherTab = strings.Split(v2, ".") ) max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { var currInt, otherInt int if len(currTab) > i { currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } if currInt > otherInt { return 1 } if otherInt > currInt { return -1 } } return 0 } // LessThan checks if a version is less than another func LessThan(v, other string) bool { return compare(v, other) == -1 } // LessThanOrEqualTo checks if a version is less than or equal to another func LessThanOrEqualTo(v, other string) bool { return compare(v, other) <= 0 } // GreaterThan checks if a version is greater than another func GreaterThan(v, other string) bool { return compare(v, other) == 1 } // GreaterThanOrEqualTo checks if a version is greater than or equal to another func GreaterThanOrEqualTo(v, other string) bool { return compare(v, other) >= 0 } // Equal checks if a version is equal to another func Equal(v, other string) bool { return compare(v, other) == 0 }
9,520
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/registry/registry.go
package registry // import "github.com/docker/docker/api/types/registry" import ( "encoding/json" "net" "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { AllowNondistributableArtifactsCIDRs []*NetIPNet AllowNondistributableArtifactsHostnames []string InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and // unmarshalled to JSON type NetIPNet net.IPNet // String returns the CIDR notation of ipnet func (ipnet *NetIPNet) String() string { return (*net.IPNet)(ipnet).String() } // MarshalJSON returns the JSON representation of the IPNet func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { return json.Marshal((*net.IPNet)(ipnet).String()) } // UnmarshalJSON sets the IPNet from a byte array of JSON func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { var ipnetStr string if err = json.Unmarshal(b, &ipnetStr); err == nil { var cidr *net.IPNet if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { *ipnet = NetIPNet(*cidr) } } return } // IndexInfo contains information about a registry // // RepositoryInfo Examples: // { // "Index" : { // "Name" : "docker.io", // "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], // "Secure" : true, // "Official" : true, // }, // "RemoteName" : "library/debian", // "LocalName" : "debian", // "CanonicalName" : "docker.io/debian" // "Official" : true, // } // // { // "Index" : { // "Name" : "127.0.0.1:5000", // "Mirrors" : [], // "Secure" : false, // "Official" : false, // }, // "RemoteName" : "user/repo", // "LocalName" : "127.0.0.1:5000/user/repo", // "CanonicalName" : "127.0.0.1:5000/user/repo", // "Official" : false, // } type IndexInfo struct { // Name is the name of the registry, such as "docker.io" Name string // Mirrors is a list of mirrors, expressed as URIs Mirrors []string // Secure is set to false if the registry is part of the list of // insecure registries. Insecure registries accept HTTP and/or accept // HTTPS with certificates from unknown CAs. Secure bool // Official indicates whether this is an official registry Official bool } // SearchResult describes a search result returned from a registry type SearchResult struct { // StarCount indicates the number of stars this repository has StarCount int `json:"star_count"` // IsOfficial is true if the result is from an official repository. IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` // IsAutomated indicates whether the result is automated IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` } // SearchResults lists a collection search results returned from a registry type SearchResults struct { // Query contains the query string that generated the search results Query string `json:"query"` // NumResults indicates the number of results the query returned NumResults int `json:"num_results"` // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } // DistributionInspect describes the result obtained from contacting the // registry to retrieve image metadata type DistributionInspect struct { // Descriptor contains information about the manifest, including // the content addressable digest Descriptor v1.Descriptor // Platforms contains the list of platforms supported by the image, // obtained by parsing the manifest Platforms []v1.Platform }
9,521
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/registry/authenticate.go
package registry // import "github.com/docker/docker/api/types/registry" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // AuthenticateOKBody authenticate o k body // swagger:model AuthenticateOKBody type AuthenticateOKBody struct { // An opaque token used to authenticate a user after a successful login // Required: true IdentityToken string `json:"IdentityToken"` // The status of the authentication // Required: true Status string `json:"Status"` }
9,522
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/strslice/strslice.go
package strslice // import "github.com/docker/docker/api/types/strslice" import "encoding/json" // StrSlice represents a string or an array of strings. // We need to override the json decoder to accept both options. type StrSlice []string // UnmarshalJSON decodes the byte slice whether it's a string or an array of // strings. This method is needed to implement json.Unmarshaler. func (e *StrSlice) UnmarshalJSON(b []byte) error { if len(b) == 0 { // With no input, we preserve the existing value by returning nil and // leaving the target alone. This allows defining default values for // the type. return nil } p := make([]string, 0, 1) if err := json.Unmarshal(b, &p); err != nil { var s string if err := json.Unmarshal(b, &s); err != nil { return err } p = append(p, s) } *e = p return nil }
9,523
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/config.go
package container // import "github.com/docker/docker/api/types/container" import ( "time" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" ) // MinimumDuration puts a minimum on user configured duration. // This is to prevent API error on time unit. For example, API may // set 3 as healthcheck interval with intention of 3 seconds, but // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. // An empty slice means to inherit the default. // The options are: // {} : inherit healthcheck // {"NONE"} : disable healthcheck // {"CMD", args...} : exec arguments directly // {"CMD-SHELL", command} : run command with system's default shell Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. Retries int `json:",omitempty"` } // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. // All fields added to this struct must be marked `omitempty` to keep getting // predictable hashes from the old `v1Compatibility` configuration. type Config struct { Hostname string // Hostname Domainname string // Domainname User string // User that will run the command(s) inside the container, also support user:group AttachStdin bool // Attach the standard input, makes possible user interaction AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled MacAddress string `json:",omitempty"` // Mac Address of the container OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile Labels map[string]string // List of labels set to this container StopSignal string `json:",omitempty"` // Signal to stop a container StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT }
9,524
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/container_create.go
package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // ContainerCreateCreatedBody OK response to ContainerCreate operation // swagger:model ContainerCreateCreatedBody type ContainerCreateCreatedBody struct { // The ID of the created container // Required: true ID string `json:"Id"` // Warnings encountered when creating the container // Required: true Warnings []string `json:"Warnings"` }
9,525
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/container_top.go
package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // ContainerTopOKBody OK response to ContainerTop operation // swagger:model ContainerTopOKBody type ContainerTopOKBody struct { // Each process running in the container, where each is process is an array of values corresponding to the titles // Required: true Processes [][]string `json:"Processes"` // The ps column titles // Required: true Titles []string `json:"Titles"` }
9,526
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/host_config.go
package container // import "github.com/docker/docker/api/types/container" import ( "strings" "github.com/docker/docker/api/types/blkiodev" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" "github.com/docker/go-units" ) // CgroupnsMode represents the cgroup namespace mode of the container type CgroupnsMode string // IsPrivate indicates whether the container uses its own private cgroup namespace func (c CgroupnsMode) IsPrivate() bool { return c == "private" } // IsHost indicates whether the container shares the host's cgroup namespace func (c CgroupnsMode) IsHost() bool { return c == "host" } // IsEmpty indicates whether the container cgroup namespace mode is unset func (c CgroupnsMode) IsEmpty() bool { return c == "" } // Valid indicates whether the cgroup namespace mode is valid func (c CgroupnsMode) Valid() bool { return c.IsEmpty() || c.IsPrivate() || c.IsHost() } // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string // IsDefault indicates the default isolation technology of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i Isolation) IsDefault() bool { return strings.ToLower(string(i)) == "default" || string(i) == "" } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i Isolation) IsHyperV() bool { return strings.ToLower(string(i)) == "hyperv" } // IsProcess indicates the use of process isolation func (i Isolation) IsProcess() bool { return strings.ToLower(string(i)) == "process" } const ( // IsolationEmpty is unspecified (same behavior as default) IsolationEmpty = Isolation("") // IsolationDefault is the default isolation mode on current daemon IsolationDefault = Isolation("default") // IsolationProcess is process isolation mode IsolationProcess = Isolation("process") // IsolationHyperV is HyperV isolation mode IsolationHyperV = Isolation("hyperv") ) // IpcMode represents the container ipc stack. type IpcMode string // IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { return n == "private" } // IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { return n == "host" } // IsShareable indicates whether the container's ipc namespace can be shared with another container. func (n IpcMode) IsShareable() bool { return n == "shareable" } // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } // IsNone indicates whether container IpcMode is set to "none". func (n IpcMode) IsNone() bool { return n == "none" } // IsEmpty indicates whether container IpcMode is empty func (n IpcMode) IsEmpty() bool { return n == "" } // Valid indicates whether the ipc mode is valid. func (n IpcMode) Valid() bool { return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() } // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 && parts[0] == "container" { return parts[1] } return "" } // NetworkMode represents the container network stack. type NetworkMode string // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { return n == "none" } // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { return n == "default" } // IsPrivate indicates whether container uses its private network stack. func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } // IsContainer indicates whether container uses a container network stack. func (n NetworkMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } // ConnectedContainer is the id of the container which network this container is connected to. func (n NetworkMode) ConnectedContainer() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] } return "" } //UserDefined indicates user-created network func (n NetworkMode) UserDefined() string { if n.IsUserDefined() { return string(n) } return "" } // UsernsMode represents userns mode in the container. type UsernsMode string // IsHost indicates whether the container uses the host's userns. func (n UsernsMode) IsHost() bool { return n == "host" } // IsPrivate indicates whether the container uses the a private userns. func (n UsernsMode) IsPrivate() bool { return !(n.IsHost()) } // Valid indicates whether the userns is valid. func (n UsernsMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": default: return false } return true } // CgroupSpec represents the cgroup to use for the container. type CgroupSpec string // IsContainer indicates whether the container is using another container cgroup func (c CgroupSpec) IsContainer() bool { parts := strings.SplitN(string(c), ":", 2) return len(parts) > 1 && parts[0] == "container" } // Valid indicates whether the cgroup spec is valid. func (c CgroupSpec) Valid() bool { return c.IsContainer() || c == "" } // Container returns the name of the container whose cgroup will be used. func (c CgroupSpec) Container() string { parts := strings.SplitN(string(c), ":", 2) if len(parts) > 1 { return parts[1] } return "" } // UTSMode represents the UTS namespace of the container. type UTSMode string // IsPrivate indicates whether the container uses its private UTS namespace. func (n UTSMode) IsPrivate() bool { return !(n.IsHost()) } // IsHost indicates whether the container uses the host's UTS namespace. func (n UTSMode) IsHost() bool { return n == "host" } // Valid indicates whether the UTS namespace is valid. func (n UTSMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": default: return false } return true } // PidMode represents the pid namespace of the container. type PidMode string // IsPrivate indicates whether the container uses its own new pid namespace. func (n PidMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } // IsHost indicates whether the container uses the host's pid namespace. func (n PidMode) IsHost() bool { return n == "host" } // IsContainer indicates whether the container uses a container's pid namespace. func (n PidMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } // Valid indicates whether the pid namespace is valid. func (n PidMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": case "container": if len(parts) != 2 || parts[1] == "" { return false } default: return false } return true } // Container returns the name of the container whose pid namespace is going to be used. func (n PidMode) Container() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] } return "" } // DeviceRequest represents a request for devices from a device driver. // Used by GPU device drivers. type DeviceRequest struct { Driver string // Name of device driver Count int // Number of devices to request (-1 = All) DeviceIDs []string // List of device IDs as recognizable by the device driver Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") Options map[string]string // Options to pass onto the device driver } // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string PathInContainer string CgroupPermissions string } // RestartPolicy represents the restart policies of the container. type RestartPolicy struct { Name string MaximumRetryCount int } // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { return rp.Name == "no" || rp.Name == "" } // IsAlways indicates whether the container has the "always" restart policy. // This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { return rp.Name == "always" } // IsOnFailure indicates whether the container has the "on-failure" restart policy. // This means the container will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { return rp.Name == "on-failure" } // IsUnlessStopped indicates whether the container has the // "unless-stopped" restart policy. This means the container will // automatically restart unless user has put it to stopped state. func (rp *RestartPolicy) IsUnlessStopped() bool { return rp.Name == "unless-stopped" } // IsSame compares two RestartPolicy to see if they are the same func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } // LogMode is a type to define the available modes for logging // These modes affect how logs are handled when log messages start piling up. type LogMode string // Available logging modes const ( LogModeUnset = "" LogModeBlocking LogMode = "blocking" LogModeNonBlock LogMode = "non-blocking" ) // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string Config map[string]string } // Resources contains container's resources (cgroups config, ulimits...) type Resources struct { // Applicable to all platforms CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) Memory int64 // Memory limit (in bytes) NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs. // Applicable to UNIX platforms CgroupParent string // Parent cgroup. BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) BlkioWeightDevice []*blkiodev.WeightDevice BlkioDeviceReadBps []*blkiodev.ThrottleDevice BlkioDeviceWriteBps []*blkiodev.ThrottleDevice BlkioDeviceReadIOps []*blkiodev.ThrottleDevice BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers KernelMemory int64 // Kernel memory limit (in bytes) KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count CPUPercent int64 `json:"CpuPercent"` // CPU percent IOMaximumIOps uint64 // Maximum IOps for the container system drive IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive } // UpdateConfig holds the mutable attributes of a Container. // Those attributes can be updated at runtime. type UpdateConfig struct { // Contains container's resources (cgroups, ulimits) Resources RestartPolicy RestartPolicy } // HostConfig the non-portable Config structure of a container. // Here, "non-portable" means "dependent of the host we are running on". // Portable information *should* appear in Config. type HostConfig struct { // Applicable to all platforms Binds []string // List of volume bindings for this container ContainerIDFile string // File (path) where the containerId is written LogConfig LogConfig // Configuration of the logs for this container NetworkMode NetworkMode // Network mode to use for the container PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host RestartPolicy RestartPolicy // Restart policy to be used for the container AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container DNS []string `json:"Dns"` // List of DNS server to lookup DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container Cgroup CgroupSpec // Cgroup to use for the container Links []string // List of links (in the name:alias form) OomScoreAdj int // Container preference for OOM-killing PidMode PidMode // PID namespace to use for the container Privileged bool // Is the container in privileged mode PublishAllPorts bool // Should docker publish all exposed port for the container ReadonlyRootfs bool // Is the container root filesystem in read-only SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container UTSMode UTSMode // UTS namespace to use for the container UsernsMode UsernsMode // The user namespace to use for the container ShmSize int64 // Total shm memory usage Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows ConsoleSize [2]uint // Initial console size (height,width) Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources // Mounts specs used by the container Mounts []mount.Mount `json:",omitempty"` // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) MaskedPaths []string // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) ReadonlyPaths []string // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` }
9,527
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/container_update.go
package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // ContainerUpdateOKBody OK response to ContainerUpdate operation // swagger:model ContainerUpdateOKBody type ContainerUpdateOKBody struct { // warnings // Required: true Warnings []string `json:"Warnings"` }
9,528
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/container_changes.go
package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // ContainerChangeResponseItem change item in response to ContainerChanges operation // swagger:model ContainerChangeResponseItem type ContainerChangeResponseItem struct { // Kind of change // Required: true Kind uint8 `json:"Kind"` // Path to file that has changed // Required: true Path string `json:"Path"` }
9,529
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/waitcondition.go
package container // import "github.com/docker/docker/api/types/container" // WaitCondition is a type used to specify a container state for which // to wait. type WaitCondition string // Possible WaitCondition Values. // // WaitConditionNotRunning (default) is used to wait for any of the non-running // states: "created", "exited", "dead", "removing", or "removed". // // WaitConditionNextExit is used to wait for the next time the state changes // to a non-running state. If the state is currently "created" or "exited", // this would cause Wait() to block until either the container runs and exits // or is removed. // // WaitConditionRemoved is used to wait for the container to be removed. const ( WaitConditionNotRunning WaitCondition = "not-running" WaitConditionNextExit WaitCondition = "next-exit" WaitConditionRemoved WaitCondition = "removed" )
9,530
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
// +build !windows package container // import "github.com/docker/docker/api/types/container" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() } // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { return "bridge" } else if n.IsHost() { return "host" } else if n.IsContainer() { return "container" } else if n.IsNone() { return "none" } else if n.IsDefault() { return "default" } else if n.IsUserDefined() { return n.UserDefined() } return "" } // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { return n == "bridge" } // IsHost indicates whether container uses the host network stack. func (n NetworkMode) IsHost() bool { return n == "host" } // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() }
9,531
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/container_wait.go
package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // ContainerWaitOKBodyError container waiting error, if any // swagger:model ContainerWaitOKBodyError type ContainerWaitOKBodyError struct { // Details of an error Message string `json:"Message,omitempty"` } // ContainerWaitOKBody OK response to ContainerWait operation // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { // error // Required: true Error *ContainerWaitOKBodyError `json:"Error"` // Exit code of the container // Required: true StatusCode int64 `json:"StatusCode"` }
9,532
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
package container // import "github.com/docker/docker/api/types/container" // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { return n == "nat" } // IsHost indicates whether container uses the host network stack. // returns false as this is not supported by windows func (n NetworkMode) IsHost() bool { return false } // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() } // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() || i.IsHyperV() || i.IsProcess() } // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsDefault() { return "default" } else if n.IsBridge() { return "nat" } else if n.IsNone() { return "none" } else if n.IsContainer() { return "container" } else if n.IsUserDefined() { return n.UserDefined() } return "" }
9,533
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/network/network.go
package network // import "github.com/docker/docker/api/types/network" import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" ) // Address represents an IP address type Address struct { Addr string PrefixLen int } // IPAM represents IP Address Management type IPAM struct { Driver string Options map[string]string //Per network IPAM driver options Config []IPAMConfig } // IPAMConfig represents IPAM configurations type IPAMConfig struct { Subnet string `json:",omitempty"` IPRange string `json:",omitempty"` Gateway string `json:",omitempty"` AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` } // EndpointIPAMConfig represents IPAM configurations for the endpoint type EndpointIPAMConfig struct { IPv4Address string `json:",omitempty"` IPv6Address string `json:",omitempty"` LinkLocalIPs []string `json:",omitempty"` } // Copy makes a copy of the endpoint ipam config func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { cfgCopy := *cfg cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) return &cfgCopy } // PeerInfo represents one peer of an overlay network type PeerInfo struct { Name string IP string } // EndpointSettings stores the network endpoint details type EndpointSettings struct { // Configurations IPAMConfig *EndpointIPAMConfig Links []string Aliases []string // Operational data NetworkID string EndpointID string Gateway string IPAddress string IPPrefixLen int IPv6Gateway string GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string DriverOpts map[string]string } // Task carries the information about one backend task type Task struct { Name string EndpointID string EndpointIP string Info map[string]string } // ServiceInfo represents service parameters with the list of service's tasks type ServiceInfo struct { VIP string Ports []string LocalLBIndex int Tasks []Task } // Copy makes a deep copy of `EndpointSettings` func (es *EndpointSettings) Copy() *EndpointSettings { epCopy := *es if es.IPAMConfig != nil { epCopy.IPAMConfig = es.IPAMConfig.Copy() } if es.Links != nil { links := make([]string, 0, len(es.Links)) epCopy.Links = append(links, es.Links...) } if es.Aliases != nil { aliases := make([]string, 0, len(es.Aliases)) epCopy.Aliases = append(aliases, es.Aliases...) } return &epCopy } // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network } // ConfigReference specifies the source which provides a network's configuration type ConfigReference struct { Network string } var acceptedFilters = map[string]bool{ "dangling": true, "driver": true, "id": true, "label": true, "name": true, "scope": true, "type": true, } // ValidateFilters validates the list of filter args with the available filters. func ValidateFilters(filter filters.Args) error { return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) }
9,534
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/network.go
package swarm // import "github.com/docker/docker/api/types/swarm" import ( "github.com/docker/docker/api/types/network" ) // Endpoint represents an endpoint. type Endpoint struct { Spec EndpointSpec `json:",omitempty"` Ports []PortConfig `json:",omitempty"` VirtualIPs []EndpointVirtualIP `json:",omitempty"` } // EndpointSpec represents the spec of an endpoint. type EndpointSpec struct { Mode ResolutionMode `json:",omitempty"` Ports []PortConfig `json:",omitempty"` } // ResolutionMode represents a resolution mode. type ResolutionMode string const ( // ResolutionModeVIP VIP ResolutionModeVIP ResolutionMode = "vip" // ResolutionModeDNSRR DNSRR ResolutionModeDNSRR ResolutionMode = "dnsrr" ) // PortConfig represents the config of a port. type PortConfig struct { Name string `json:",omitempty"` Protocol PortConfigProtocol `json:",omitempty"` // TargetPort is the port inside the container TargetPort uint32 `json:",omitempty"` // PublishedPort is the port on the swarm hosts PublishedPort uint32 `json:",omitempty"` // PublishMode is the mode in which port is published PublishMode PortConfigPublishMode `json:",omitempty"` } // PortConfigPublishMode represents the mode in which the port is to // be published. type PortConfigPublishMode string const ( // PortConfigPublishModeIngress is used for ports published // for ingress load balancing using routing mesh. PortConfigPublishModeIngress PortConfigPublishMode = "ingress" // PortConfigPublishModeHost is used for ports published // for direct host level access on the host where the task is running. PortConfigPublishModeHost PortConfigPublishMode = "host" ) // PortConfigProtocol represents the protocol of a port. type PortConfigProtocol string const ( // TODO(stevvooe): These should be used generally, not just for PortConfig. // PortConfigProtocolTCP TCP PortConfigProtocolTCP PortConfigProtocol = "tcp" // PortConfigProtocolUDP UDP PortConfigProtocolUDP PortConfigProtocol = "udp" // PortConfigProtocolSCTP SCTP PortConfigProtocolSCTP PortConfigProtocol = "sctp" ) // EndpointVirtualIP represents the virtual ip of a port. type EndpointVirtualIP struct { NetworkID string `json:",omitempty"` Addr string `json:",omitempty"` } // Network represents a network. type Network struct { ID string Meta Spec NetworkSpec `json:",omitempty"` DriverState Driver `json:",omitempty"` IPAMOptions *IPAMOptions `json:",omitempty"` } // NetworkSpec represents the spec of a network. type NetworkSpec struct { Annotations DriverConfiguration *Driver `json:",omitempty"` IPv6Enabled bool `json:",omitempty"` Internal bool `json:",omitempty"` Attachable bool `json:",omitempty"` Ingress bool `json:",omitempty"` IPAMOptions *IPAMOptions `json:",omitempty"` ConfigFrom *network.ConfigReference `json:",omitempty"` Scope string `json:",omitempty"` } // NetworkAttachmentConfig represents the configuration of a network attachment. type NetworkAttachmentConfig struct { Target string `json:",omitempty"` Aliases []string `json:",omitempty"` DriverOpts map[string]string `json:",omitempty"` } // NetworkAttachment represents a network attachment. type NetworkAttachment struct { Network Network `json:",omitempty"` Addresses []string `json:",omitempty"` } // IPAMOptions represents ipam options. type IPAMOptions struct { Driver Driver `json:",omitempty"` Configs []IPAMConfig `json:",omitempty"` } // IPAMConfig represents ipam configuration. type IPAMConfig struct { Subnet string `json:",omitempty"` Range string `json:",omitempty"` Gateway string `json:",omitempty"` }
9,535
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/common.go
package swarm // import "github.com/docker/docker/api/types/swarm" import "time" // Version represents the internal object version. type Version struct { Index uint64 `json:",omitempty"` } // Meta is a base object inherited by most of the other once. type Meta struct { Version Version `json:",omitempty"` CreatedAt time.Time `json:",omitempty"` UpdatedAt time.Time `json:",omitempty"` } // Annotations represents how to describe an object. type Annotations struct { Name string `json:",omitempty"` Labels map[string]string `json:"Labels"` } // Driver represents a driver (network, logging, secrets backend). type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } // TLSInfo represents the TLS information about what CA certificate is trusted, // and who the issuer for a TLS certificate is type TLSInfo struct { // TrustRoot is the trusted CA root certificate in PEM format TrustRoot string `json:",omitempty"` // CertIssuer is the raw subject bytes of the issuer CertIssuerSubject []byte `json:",omitempty"` // CertIssuerPublicKey is the raw public key bytes of the issuer CertIssuerPublicKey []byte `json:",omitempty"` }
9,536
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/config.go
package swarm // import "github.com/docker/docker/api/types/swarm" import "os" // Config represents a config. type Config struct { ID string Meta Spec ConfigSpec } // ConfigSpec represents a config specification from a config in swarm type ConfigSpec struct { Annotations Data []byte `json:",omitempty"` // Templating controls whether and how to evaluate the config payload as // a template. If it is not set, no templating is used. Templating *Driver `json:",omitempty"` } // ConfigReferenceFileTarget is a file target in a config reference type ConfigReferenceFileTarget struct { Name string UID string GID string Mode os.FileMode } // ConfigReferenceRuntimeTarget is a target for a config specifying that it // isn't mounted into the container but instead has some other purpose. type ConfigReferenceRuntimeTarget struct{} // ConfigReference is a reference to a config in swarm type ConfigReference struct { File *ConfigReferenceFileTarget `json:",omitempty"` Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` ConfigID string ConfigName string }
9,537
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/task.go
package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" "github.com/docker/docker/api/types/swarm/runtime" ) // TaskState represents the state of a task. type TaskState string const ( // TaskStateNew NEW TaskStateNew TaskState = "new" // TaskStateAllocated ALLOCATED TaskStateAllocated TaskState = "allocated" // TaskStatePending PENDING TaskStatePending TaskState = "pending" // TaskStateAssigned ASSIGNED TaskStateAssigned TaskState = "assigned" // TaskStateAccepted ACCEPTED TaskStateAccepted TaskState = "accepted" // TaskStatePreparing PREPARING TaskStatePreparing TaskState = "preparing" // TaskStateReady READY TaskStateReady TaskState = "ready" // TaskStateStarting STARTING TaskStateStarting TaskState = "starting" // TaskStateRunning RUNNING TaskStateRunning TaskState = "running" // TaskStateComplete COMPLETE TaskStateComplete TaskState = "complete" // TaskStateShutdown SHUTDOWN TaskStateShutdown TaskState = "shutdown" // TaskStateFailed FAILED TaskStateFailed TaskState = "failed" // TaskStateRejected REJECTED TaskStateRejected TaskState = "rejected" // TaskStateRemove REMOVE TaskStateRemove TaskState = "remove" // TaskStateOrphaned ORPHANED TaskStateOrphaned TaskState = "orphaned" ) // Task represents a task. type Task struct { ID string Meta Annotations Spec TaskSpec `json:",omitempty"` ServiceID string `json:",omitempty"` Slot int `json:",omitempty"` NodeID string `json:",omitempty"` Status TaskStatus `json:",omitempty"` DesiredState TaskState `json:",omitempty"` NetworksAttachments []NetworkAttachment `json:",omitempty"` GenericResources []GenericResource `json:",omitempty"` } // TaskSpec represents the spec of a task. type TaskSpec struct { // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. // PluginSpec is only used when the `Runtime` field is set to `plugin` // NetworkAttachmentSpec is used if the `Runtime` field is set to // `attachment`. ContainerSpec *ContainerSpec `json:",omitempty"` PluginSpec *runtime.PluginSpec `json:",omitempty"` NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` Networks []NetworkAttachmentConfig `json:",omitempty"` // LogDriver specifies the LogDriver to use for tasks created from this // spec. If not present, the one on cluster default on swarm.Spec will be // used, finally falling back to the engine default if not specified. LogDriver *Driver `json:",omitempty"` // ForceUpdate is a counter that triggers an update even if no relevant // parameters have been changed. ForceUpdate uint64 Runtime RuntimeType `json:",omitempty"` } // Resources represents resources (CPU/Memory). type Resources struct { NanoCPUs int64 `json:",omitempty"` MemoryBytes int64 `json:",omitempty"` GenericResources []GenericResource `json:",omitempty"` } // GenericResource represents a "user defined" resource which can // be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) type GenericResource struct { NamedResourceSpec *NamedGenericResource `json:",omitempty"` DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` } // NamedGenericResource represents a "user defined" resource which is defined // as a string. // "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) // Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) type NamedGenericResource struct { Kind string `json:",omitempty"` Value string `json:",omitempty"` } // DiscreteGenericResource represents a "user defined" resource which is defined // as an integer // "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) // Value is used to count the resource (SSD=5, HDD=3, ...) type DiscreteGenericResource struct { Kind string `json:",omitempty"` Value int64 `json:",omitempty"` } // ResourceRequirements represents resources requirements. type ResourceRequirements struct { Limits *Resources `json:",omitempty"` Reservations *Resources `json:",omitempty"` } // Placement represents orchestration parameters. type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` MaxReplicas uint64 `json:",omitempty"` // Platforms stores all the platforms that the image can run on. // This field is used in the platform filter for scheduling. If empty, // then the platform filter is off, meaning there are no scheduling restrictions. Platforms []Platform `json:",omitempty"` } // PlacementPreference provides a way to make the scheduler aware of factors // such as topology. type PlacementPreference struct { Spread *SpreadOver } // SpreadOver is a scheduling preference that instructs the scheduler to spread // tasks evenly over groups of nodes identified by labels. type SpreadOver struct { // label descriptor, such as engine.labels.az SpreadDescriptor string } // RestartPolicy represents the restart policy. type RestartPolicy struct { Condition RestartPolicyCondition `json:",omitempty"` Delay *time.Duration `json:",omitempty"` MaxAttempts *uint64 `json:",omitempty"` Window *time.Duration `json:",omitempty"` } // RestartPolicyCondition represents when to restart. type RestartPolicyCondition string const ( // RestartPolicyConditionNone NONE RestartPolicyConditionNone RestartPolicyCondition = "none" // RestartPolicyConditionOnFailure ON_FAILURE RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" // RestartPolicyConditionAny ANY RestartPolicyConditionAny RestartPolicyCondition = "any" ) // TaskStatus represents the status of a task. type TaskStatus struct { Timestamp time.Time `json:",omitempty"` State TaskState `json:",omitempty"` Message string `json:",omitempty"` Err string `json:",omitempty"` ContainerStatus *ContainerStatus `json:",omitempty"` PortStatus PortStatus `json:",omitempty"` } // ContainerStatus represents the status of a container. type ContainerStatus struct { ContainerID string PID int ExitCode int } // PortStatus represents the port status of a task's host ports whose // service has published host ports type PortStatus struct { Ports []PortConfig `json:",omitempty"` }
9,538
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/service.go
package swarm // import "github.com/docker/docker/api/types/swarm" import "time" // Service represents a service. type Service struct { ID string Meta Spec ServiceSpec `json:",omitempty"` PreviousSpec *ServiceSpec `json:",omitempty"` Endpoint Endpoint `json:",omitempty"` UpdateStatus *UpdateStatus `json:",omitempty"` } // ServiceSpec represents the spec of a service. type ServiceSpec struct { Annotations // TaskTemplate defines how the service should construct new tasks when // orchestrating this service. TaskTemplate TaskSpec `json:",omitempty"` Mode ServiceMode `json:",omitempty"` UpdateConfig *UpdateConfig `json:",omitempty"` RollbackConfig *UpdateConfig `json:",omitempty"` // Networks field in ServiceSpec is deprecated. The // same field in TaskSpec should be used instead. // This field will be removed in a future release. Networks []NetworkAttachmentConfig `json:",omitempty"` EndpointSpec *EndpointSpec `json:",omitempty"` } // ServiceMode represents the mode of a service. type ServiceMode struct { Replicated *ReplicatedService `json:",omitempty"` Global *GlobalService `json:",omitempty"` } // UpdateState is the state of a service update. type UpdateState string const ( // UpdateStateUpdating is the updating state. UpdateStateUpdating UpdateState = "updating" // UpdateStatePaused is the paused state. UpdateStatePaused UpdateState = "paused" // UpdateStateCompleted is the completed state. UpdateStateCompleted UpdateState = "completed" // UpdateStateRollbackStarted is the state with a rollback in progress. UpdateStateRollbackStarted UpdateState = "rollback_started" // UpdateStateRollbackPaused is the state with a rollback in progress. UpdateStateRollbackPaused UpdateState = "rollback_paused" // UpdateStateRollbackCompleted is the state with a rollback in progress. UpdateStateRollbackCompleted UpdateState = "rollback_completed" ) // UpdateStatus reports the status of a service update. type UpdateStatus struct { State UpdateState `json:",omitempty"` StartedAt *time.Time `json:",omitempty"` CompletedAt *time.Time `json:",omitempty"` Message string `json:",omitempty"` } // ReplicatedService is a kind of ServiceMode. type ReplicatedService struct { Replicas *uint64 `json:",omitempty"` } // GlobalService is a kind of ServiceMode. type GlobalService struct{} const ( // UpdateFailureActionPause PAUSE UpdateFailureActionPause = "pause" // UpdateFailureActionContinue CONTINUE UpdateFailureActionContinue = "continue" // UpdateFailureActionRollback ROLLBACK UpdateFailureActionRollback = "rollback" // UpdateOrderStopFirst STOP_FIRST UpdateOrderStopFirst = "stop-first" // UpdateOrderStartFirst START_FIRST UpdateOrderStartFirst = "start-first" ) // UpdateConfig represents the update configuration. type UpdateConfig struct { // Maximum number of tasks to be updated in one iteration. // 0 means unlimited parallelism. Parallelism uint64 // Amount of time between updates. Delay time.Duration `json:",omitempty"` // FailureAction is the action to take when an update failures. FailureAction string `json:",omitempty"` // Monitor indicates how long to monitor a task for failure after it is // created. If the task fails by ending up in one of the states // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, // this counts as a failure. If it fails after Monitor, it does not // count as a failure. If Monitor is unspecified, a default value will // be used. Monitor time.Duration `json:",omitempty"` // MaxFailureRatio is the fraction of tasks that may fail during // an update before the failure action is invoked. Any task created by // the current update which ends up in one of the states REJECTED, // COMPLETED or FAILED within Monitor from its creation counts as a // failure. The number of failures is divided by the number of tasks // being updated, and if this fraction is greater than // MaxFailureRatio, the failure action is invoked. // // If the failure action is CONTINUE, there is no effect. // If the failure action is PAUSE, no more tasks will be updated until // another update is started. MaxFailureRatio float32 // Order indicates the order of operations when rolling out an updated // task. Either the old task is shut down before the new task is // started, or the new task is started before the old task is shut down. Order string }
9,539
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/secret.go
package swarm // import "github.com/docker/docker/api/types/swarm" import "os" // Secret represents a secret. type Secret struct { ID string Meta Spec SecretSpec } // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations Data []byte `json:",omitempty"` Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store // Templating controls whether and how to evaluate the secret payload as // a template. If it is not set, no templating is used. Templating *Driver `json:",omitempty"` } // SecretReferenceFileTarget is a file target in a secret reference type SecretReferenceFileTarget struct { Name string UID string GID string Mode os.FileMode } // SecretReference is a reference to a secret in swarm type SecretReference struct { File *SecretReferenceFileTarget SecretID string SecretName string }
9,540
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/runtime.go
package swarm // import "github.com/docker/docker/api/types/swarm" // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string // RuntimeURL is the proto type url type RuntimeURL string const ( // RuntimeContainer is the container based runtime RuntimeContainer RuntimeType = "container" // RuntimePlugin is the plugin based runtime RuntimePlugin RuntimeType = "plugin" // RuntimeNetworkAttachment is the network attachment runtime RuntimeNetworkAttachment RuntimeType = "attachment" // RuntimeURLContainer is the proto url for the container type RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" // RuntimeURLPlugin is the proto url for the plugin type RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" ) // NetworkAttachmentSpec represents the runtime spec type for network // attachment tasks type NetworkAttachmentSpec struct { ContainerID string }
9,541
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/node.go
package swarm // import "github.com/docker/docker/api/types/swarm" // Node represents a node. type Node struct { ID string Meta // Spec defines the desired state of the node as specified by the user. // The system will honor this and will *never* modify it. Spec NodeSpec `json:",omitempty"` // Description encapsulates the properties of the Node as reported by the // agent. Description NodeDescription `json:",omitempty"` // Status provides the current status of the node, as seen by the manager. Status NodeStatus `json:",omitempty"` // ManagerStatus provides the current status of the node's manager // component, if the node is a manager. ManagerStatus *ManagerStatus `json:",omitempty"` } // NodeSpec represents the spec of a node. type NodeSpec struct { Annotations Role NodeRole `json:",omitempty"` Availability NodeAvailability `json:",omitempty"` } // NodeRole represents the role of a node. type NodeRole string const ( // NodeRoleWorker WORKER NodeRoleWorker NodeRole = "worker" // NodeRoleManager MANAGER NodeRoleManager NodeRole = "manager" ) // NodeAvailability represents the availability of a node. type NodeAvailability string const ( // NodeAvailabilityActive ACTIVE NodeAvailabilityActive NodeAvailability = "active" // NodeAvailabilityPause PAUSE NodeAvailabilityPause NodeAvailability = "pause" // NodeAvailabilityDrain DRAIN NodeAvailabilityDrain NodeAvailability = "drain" ) // NodeDescription represents the description of a node. type NodeDescription struct { Hostname string `json:",omitempty"` Platform Platform `json:",omitempty"` Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` TLSInfo TLSInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). type Platform struct { Architecture string `json:",omitempty"` OS string `json:",omitempty"` } // EngineDescription represents the description of an engine. type EngineDescription struct { EngineVersion string `json:",omitempty"` Labels map[string]string `json:",omitempty"` Plugins []PluginDescription `json:",omitempty"` } // PluginDescription represents the description of an engine plugin. type PluginDescription struct { Type string `json:",omitempty"` Name string `json:",omitempty"` } // NodeStatus represents the status of a node. type NodeStatus struct { State NodeState `json:",omitempty"` Message string `json:",omitempty"` Addr string `json:",omitempty"` } // Reachability represents the reachability of a node. type Reachability string const ( // ReachabilityUnknown UNKNOWN ReachabilityUnknown Reachability = "unknown" // ReachabilityUnreachable UNREACHABLE ReachabilityUnreachable Reachability = "unreachable" // ReachabilityReachable REACHABLE ReachabilityReachable Reachability = "reachable" ) // ManagerStatus represents the status of a manager. type ManagerStatus struct { Leader bool `json:",omitempty"` Reachability Reachability `json:",omitempty"` Addr string `json:",omitempty"` } // NodeState represents the state of a node. type NodeState string const ( // NodeStateUnknown UNKNOWN NodeStateUnknown NodeState = "unknown" // NodeStateDown DOWN NodeStateDown NodeState = "down" // NodeStateReady READY NodeStateReady NodeState = "ready" // NodeStateDisconnected DISCONNECTED NodeStateDisconnected NodeState = "disconnected" )
9,542
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/swarm.go
package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" ) // ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string Meta Spec Spec TLSInfo TLSInfo RootRotationInProgress bool DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 } // Swarm represents a swarm. type Swarm struct { ClusterInfo JoinTokens JoinTokens } // JoinTokens contains the tokens workers and managers need to join the swarm. type JoinTokens struct { // Worker is the join token workers may use to join the swarm. Worker string // Manager is the join token managers may use to join the swarm. Manager string } // Spec represents the spec of a swarm. type Spec struct { Annotations Orchestration OrchestrationConfig `json:",omitempty"` Raft RaftConfig `json:",omitempty"` Dispatcher DispatcherConfig `json:",omitempty"` CAConfig CAConfig `json:",omitempty"` TaskDefaults TaskDefaults `json:",omitempty"` EncryptionConfig EncryptionConfig `json:",omitempty"` } // OrchestrationConfig represents orchestration configuration. type OrchestrationConfig struct { // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or // node. If negative, never remove completed or failed tasks. TaskHistoryRetentionLimit *int64 `json:",omitempty"` } // TaskDefaults parameterizes cluster-level task creation with default values. type TaskDefaults struct { // LogDriver selects the log driver to use for tasks created in the // orchestrator if unspecified by a service. // // Updating this value will only have an affect on new tasks. Old tasks // will continue use their previously configured log driver until // recreated. LogDriver *Driver `json:",omitempty"` } // EncryptionConfig controls at-rest encryption of data and keys. type EncryptionConfig struct { // AutoLockManagers specifies whether or not managers TLS keys and raft data // should be encrypted at rest in such a way that they must be unlocked // before the manager node starts up again. AutoLockManagers bool } // RaftConfig represents raft configuration. type RaftConfig struct { // SnapshotInterval is the number of log entries between snapshots. SnapshotInterval uint64 `json:",omitempty"` // KeepOldSnapshots is the number of snapshots to keep beyond the // current snapshot. KeepOldSnapshots *uint64 `json:",omitempty"` // LogEntriesForSlowFollowers is the number of log entries to keep // around to sync up slow followers after a snapshot is created. LogEntriesForSlowFollowers uint64 `json:",omitempty"` // ElectionTick is the number of ticks that a follower will wait for a message // from the leader before becoming a candidate and starting an election. // ElectionTick must be greater than HeartbeatTick. // // A tick currently defaults to one second, so these translate directly to // seconds currently, but this is NOT guaranteed. ElectionTick int // HeartbeatTick is the number of ticks between heartbeats. Every // HeartbeatTick ticks, the leader will send a heartbeat to the // followers. // // A tick currently defaults to one second, so these translate directly to // seconds currently, but this is NOT guaranteed. HeartbeatTick int } // DispatcherConfig represents dispatcher configuration. type DispatcherConfig struct { // HeartbeatPeriod defines how often agent should send heartbeats to // dispatcher. HeartbeatPeriod time.Duration `json:",omitempty"` } // CAConfig represents CA configuration. type CAConfig struct { // NodeCertExpiry is the duration certificates should be issued for NodeCertExpiry time.Duration `json:",omitempty"` // ExternalCAs is a list of CAs to which a manager node will make // certificate signing requests for node certificates. ExternalCAs []*ExternalCA `json:",omitempty"` // SigningCACert and SigningCAKey specify the desired signing root CA and // root CA key for the swarm. When inspecting the cluster, the key will // be redacted. SigningCACert string `json:",omitempty"` SigningCAKey string `json:",omitempty"` // If this value changes, and there is no specified signing cert and key, // then the swarm is forced to generate a new root certificate ane key. ForceRotate uint64 `json:",omitempty"` } // ExternalCAProtocol represents type of external CA. type ExternalCAProtocol string // ExternalCAProtocolCFSSL CFSSL const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" // ExternalCA defines external CA to be used by the cluster. type ExternalCA struct { // Protocol is the protocol used by this external CA. Protocol ExternalCAProtocol // URL is the URL where the external CA can be reached. URL string // Options is a set of additional key/value pairs whose interpretation // depends on the specified CA type. Options map[string]string `json:",omitempty"` // CACert specifies which root CA is used by this external CA. This certificate must // be in PEM format. CACert string } // InitRequest is the request used to init a swarm. type InitRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string DataPathPort uint32 ForceNewCluster bool Spec Spec AutoLockManagers bool Availability NodeAvailability DefaultAddrPool []string SubnetSize uint32 } // JoinRequest is the request used to join a swarm. type JoinRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string RemoteAddrs []string JoinToken string // accept by secret Availability NodeAvailability } // UnlockRequest is the request used to unlock a swarm. type UnlockRequest struct { // UnlockKey is the unlock key in ASCII-armored format. UnlockKey string } // LocalNodeState represents the state of the local node. type LocalNodeState string const ( // LocalNodeStateInactive INACTIVE LocalNodeStateInactive LocalNodeState = "inactive" // LocalNodeStatePending PENDING LocalNodeStatePending LocalNodeState = "pending" // LocalNodeStateActive ACTIVE LocalNodeStateActive LocalNodeState = "active" // LocalNodeStateError ERROR LocalNodeStateError LocalNodeState = "error" // LocalNodeStateLocked LOCKED LocalNodeStateLocked LocalNodeState = "locked" ) // Info represents generic information about swarm. type Info struct { NodeID string NodeAddr string LocalNodeState LocalNodeState ControlAvailable bool Error string RemoteManagers []Peer Nodes int `json:",omitempty"` Managers int `json:",omitempty"` Cluster *ClusterInfo `json:",omitempty"` Warnings []string `json:",omitempty"` } // Peer represents a peer. type Peer struct { NodeID string Addr string } // UpdateFlags contains flags for SwarmUpdate. type UpdateFlags struct { RotateWorkerToken bool RotateManagerToken bool RotateManagerUnlockKey bool }
9,543
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/container.go
package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" ) // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) // Detailed documentation is available in: // http://man7.org/linux/man-pages/man5/resolv.conf.5.html // `nameserver`, `search`, `options` have been supported. // TODO: `domain` is not supported yet. type DNSConfig struct { // Nameservers specifies the IP addresses of the name servers Nameservers []string `json:",omitempty"` // Search specifies the search list for host-name lookup Search []string `json:",omitempty"` // Options allows certain internal resolver variables to be modified Options []string `json:",omitempty"` } // SELinuxContext contains the SELinux labels of the container. type SELinuxContext struct { Disable bool User string Role string Type string Level string } // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { Config string File string Registry string } // Privileges defines the security options for the container. type Privileges struct { CredentialSpec *CredentialSpec SELinuxContext *SELinuxContext } // ContainerSpec represents the spec of a container. type ContainerSpec struct { Image string `json:",omitempty"` Labels map[string]string `json:",omitempty"` Command []string `json:",omitempty"` Args []string `json:",omitempty"` Hostname string `json:",omitempty"` Env []string `json:",omitempty"` Dir string `json:",omitempty"` User string `json:",omitempty"` Groups []string `json:",omitempty"` Privileges *Privileges `json:",omitempty"` Init *bool `json:",omitempty"` StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` ReadOnly bool `json:",omitempty"` Mounts []mount.Mount `json:",omitempty"` StopGracePeriod *time.Duration `json:",omitempty"` Healthcheck *container.HealthConfig `json:",omitempty"` // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] Hosts []string `json:",omitempty"` DNSConfig *DNSConfig `json:",omitempty"` Secrets []*SecretReference `json:",omitempty"` Configs []*ConfigReference `json:",omitempty"` Isolation container.Isolation `json:",omitempty"` Sysctls map[string]string `json:",omitempty"` Capabilities []string `json:",omitempty"` }
9,544
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
syntax = "proto3"; option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. message PluginSpec { string name = 1; string remote = 2; repeated PluginPrivilege privileges = 3; bool disabled = 4; repeated string env = 5; } // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. message PluginPrivilege { string name = 1; string description = 2; repeated string value = 3; }
9,545
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
9,546
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto /* Package runtime is a generated protocol buffer package. It is generated from these files: plugin.proto It has these top-level messages: PluginSpec PluginPrivilege */ package runtime import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. type PluginSpec struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` } func (m *PluginSpec) Reset() { *m = PluginSpec{} } func (m *PluginSpec) String() string { return proto.CompactTextString(m) } func (*PluginSpec) ProtoMessage() {} func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } func (m *PluginSpec) GetName() string { if m != nil { return m.Name } return "" } func (m *PluginSpec) GetRemote() string { if m != nil { return m.Remote } return "" } func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { if m != nil { return m.Privileges } return nil } func (m *PluginSpec) GetDisabled() bool { if m != nil { return m.Disabled } return false } func (m *PluginSpec) GetEnv() []string { if m != nil { return m.Env } return nil } // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. type PluginPrivilege struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` } func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } func (*PluginPrivilege) ProtoMessage() {} func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } func (m *PluginPrivilege) GetName() string { if m != nil { return m.Name } return "" } func (m *PluginPrivilege) GetDescription() string { if m != nil { return m.Description } return "" } func (m *PluginPrivilege) GetValue() []string { if m != nil { return m.Value } return nil } func init() { proto.RegisterType((*PluginSpec)(nil), "PluginSpec") proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") } func (m *PluginSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Name) > 0 { dAtA[i] = 0xa i++ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) } if len(m.Remote) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) i += copy(dAtA[i:], m.Remote) } if len(m.Privileges) > 0 { for _, msg := range m.Privileges { dAtA[i] = 0x1a i++ i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.Disabled { dAtA[i] = 0x20 i++ if m.Disabled { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.Env) > 0 { for _, s := range m.Env { dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Name) > 0 { dAtA[i] = 0xa i++ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) } if len(m.Description) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) i += copy(dAtA[i:], m.Description) } if len(m.Value) > 0 { for _, s := range m.Value { dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *PluginSpec) Size() (n int) { var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovPlugin(uint64(l)) } l = len(m.Remote) if l > 0 { n += 1 + l + sovPlugin(uint64(l)) } if len(m.Privileges) > 0 { for _, e := range m.Privileges { l = e.Size() n += 1 + l + sovPlugin(uint64(l)) } } if m.Disabled { n += 2 } if len(m.Env) > 0 { for _, s := range m.Env { l = len(s) n += 1 + l + sovPlugin(uint64(l)) } } return n } func (m *PluginPrivilege) Size() (n int) { var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovPlugin(uint64(l)) } l = len(m.Description) if l > 0 { n += 1 + l + sovPlugin(uint64(l)) } if len(m.Value) > 0 { for _, s := range m.Value { l = len(s) n += 1 + l + sovPlugin(uint64(l)) } } return n } func sovPlugin(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozPlugin(x uint64) (n int) { return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *PluginSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Remote = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Privileges = append(m.Privileges, &PluginPrivilege{}) if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Disabled = bool(v != 0) case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlugin(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlugin } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlugin(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipPlugin(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowPlugin } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowPlugin } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowPlugin } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthPlugin } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowPlugin } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipPlugin(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } var fileDescriptorPlugin = []byte{ // 256 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, }
9,547
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
package blkiodev // import "github.com/docker/docker/api/types/blkiodev" import "fmt" // WeightDevice is a structure that holds device:weight pair type WeightDevice struct { Path string Weight uint16 } func (w *WeightDevice) String() string { return fmt.Sprintf("%s:%d", w.Path, w.Weight) } // ThrottleDevice is a structure that holds device:rate_per_second pair type ThrottleDevice struct { Path string Rate uint64 } func (t *ThrottleDevice) String() string { return fmt.Sprintf("%s:%d", t.Path, t.Rate) }
9,548
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker/api/types/mount/mount.go
package mount // import "github.com/docker/docker/api/types/mount" import ( "os" ) // Type represents the type of a mount. type Type string // Type constants const ( // TypeBind is the type for mounting host dir TypeBind Type = "bind" // TypeVolume is the type for remote storage volumes TypeVolume Type = "volume" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs Type = "tmpfs" // TypeNamedPipe is the type for mounting Windows named pipes TypeNamedPipe Type = "npipe" ) // Mount represents a mount (volume). type Mount struct { Type Type `json:",omitempty"` // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. // Source is not supported for tmpfs (must be an empty value) Source string `json:",omitempty"` Target string `json:",omitempty"` ReadOnly bool `json:",omitempty"` Consistency Consistency `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` TmpfsOptions *TmpfsOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. type Propagation string const ( // PropagationRPrivate RPRIVATE PropagationRPrivate Propagation = "rprivate" // PropagationPrivate PRIVATE PropagationPrivate Propagation = "private" // PropagationRShared RSHARED PropagationRShared Propagation = "rshared" // PropagationShared SHARED PropagationShared Propagation = "shared" // PropagationRSlave RSLAVE PropagationRSlave Propagation = "rslave" // PropagationSlave SLAVE PropagationSlave Propagation = "slave" ) // Propagations is the list of all valid mount propagations var Propagations = []Propagation{ PropagationRPrivate, PropagationPrivate, PropagationRShared, PropagationShared, PropagationRSlave, PropagationSlave, } // Consistency represents the consistency requirements of a mount. type Consistency string const ( // ConsistencyFull guarantees bind mount-like consistency ConsistencyFull Consistency = "consistent" // ConsistencyCached mounts can cache read data and FS structure ConsistencyCached Consistency = "cached" // ConsistencyDelegated mounts can cache read and written data and structure ConsistencyDelegated Consistency = "delegated" // ConsistencyDefault provides "consistent" behavior unless overridden ConsistencyDefault Consistency = "default" ) // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { Propagation Propagation `json:",omitempty"` NonRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. type VolumeOptions struct { NoCopy bool `json:",omitempty"` Labels map[string]string `json:",omitempty"` DriverConfig *Driver `json:",omitempty"` } // Driver represents a volume driver. type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } // TmpfsOptions defines options specific to mounts of type "tmpfs". type TmpfsOptions struct { // Size sets the size of the tmpfs, in bytes. // // This will be converted to an operating system specific value // depending on the host. For example, on linux, it will be converted to // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with // docker, uses a straight byte value. // // Percentages are not supported. SizeBytes int64 `json:",omitempty"` // Mode of the tmpfs upon creation Mode os.FileMode `json:",omitempty"` // TODO(stevvooe): There are several more tmpfs flags, specified in the // daemon, that are accepted. Only the most basic are added for now. // // From docker/docker/pkg/mount/flags.go: // // var validFlags = map[string]bool{ // "": true, // "size": true, X // "mode": true, X // "uid": true, // "gid": true, // "nr_inodes": true, // "nr_blocks": true, // "mpol": true, // } // // Some of these may be straightforward to add, but others, such as // uid/gid have implications in a clustered system. }
9,549
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/NOTICE
Docker Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
9,550
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/AUTHORS
# This file lists all individuals having contributed content to the repository. # For how it is generated, see `scripts/docs/generate-authors.sh`. Aanand Prasad <[email protected]> Aaron L. Xu <[email protected]> Aaron Lehmann <[email protected]> Aaron.L.Xu <[email protected]> Abdur Rehman <[email protected]> Abhinandan Prativadi <[email protected]> Abin Shahab <[email protected]> Abreto FU <[email protected]> Ace Tang <[email protected]> Addam Hardy <[email protected]> Adolfo Ochagavía <[email protected]> Adrian Plata <[email protected]> Adrien Duermael <[email protected]> Adrien Folie <[email protected]> Ahmet Alp Balkan <[email protected]> Aidan Feldman <[email protected]> Aidan Hobson Sayers <[email protected]> AJ Bowen <[email protected]> Akhil Mohan <[email protected]> Akihiro Suda <[email protected]> Akim Demaille <[email protected]> Alan Thompson <[email protected]> Albert Callarisa <[email protected]> Albin Kerouanton <[email protected]> Aleksa Sarai <[email protected]> Aleksander Piotrowski <[email protected]> Alessandro Boch <[email protected]> Alex Mavrogiannis <[email protected]> Alex Mayer <[email protected]> Alexander Boyd <[email protected]> Alexander Larsson <[email protected]> Alexander Morozov <[email protected]> Alexander Ryabov <[email protected]> Alexandre González <[email protected]> Alfred Landrum <[email protected]> Alicia Lauerman <[email protected]> Allen Sun <[email protected]> Alvin Deng <[email protected]> Amen Belayneh <[email protected]> Amir Goldstein <[email protected]> Amit Krishnan <[email protected]> Amit Shukla <[email protected]> Amy Lindburg <[email protected]> Anca Iordache <[email protected]> Anda Xu <[email protected]> Andrea Luzzardi <[email protected]> Andreas Köhler <[email protected]> Andrew France <[email protected]> Andrew Hsu <[email protected]> Andrew Macpherson <[email protected]> Andrew McDonnell <[email protected]> Andrew Po <[email protected]> Andrey Petrov <[email protected]> Andrii Berehuliak <[email protected]> André Martins <[email protected]> Andy Goldstein <[email protected]> Andy Rothfusz <[email protected]> Anil Madhavapeddy <[email protected]> Ankush Agarwal <[email protected]> Anne Henmi <[email protected]> Anton Polonskiy <[email protected]> Antonio Murdaca <[email protected]> Antonis Kalipetis <[email protected]> Anusha Ragunathan <[email protected]> Ao Li <[email protected]> Arash Deshmeh <[email protected]> Arko Dasgupta <[email protected]> Arnaud Porterie <[email protected]> Arthur Peka <[email protected]> Ashwini Oruganti <[email protected]> Azat Khuyiyakhmetov <[email protected]> Bardia Keyoumarsi <[email protected]> Barnaby Gray <[email protected]> Bastiaan Bakker <[email protected]> BastianHofmann <[email protected]> Ben Bonnefoy <[email protected]> Ben Creasy <[email protected]> Ben Firshman <[email protected]> Benjamin Boudreau <[email protected]> Benoit Sigoure <[email protected]> Bhumika Bayani <[email protected]> Bill Wang <[email protected]> Bin Liu <[email protected]> Bingshen Wang <[email protected]> Boaz Shuster <[email protected]> Bogdan Anton <[email protected]> Boris Pruessmann <[email protected]> Bradley Cicenas <[email protected]> Brandon Mitchell <[email protected]> Brandon Philips <[email protected]> Brent Salisbury <[email protected]> Bret Fisher <[email protected]> Brian (bex) Exelbierd <[email protected]> Brian Goff <[email protected]> Brian Wieder <[email protected]> Bryan Bess <[email protected]> Bryan Boreham <[email protected]> Bryan Murphy <[email protected]> bryfry <[email protected]> Cameron Spear <[email protected]> Cao Weiwei <[email protected]> Carlo Mion <[email protected]> Carlos Alexandro Becker <[email protected]> Carlos de Paula <[email protected]> Ce Gao <[email protected]> Cedric Davies <[email protected]> Cezar Sa Espinola <[email protected]> Chad Faragher <[email protected]> Chao Wang <[email protected]> Charles Chan <[email protected]> Charles Law <[email protected]> Charles Smith <[email protected]> Charlie Drage <[email protected]> ChaYoung You <[email protected]> Chen Chuanliang <[email protected]> Chen Hanxiao <[email protected]> Chen Mingjie <[email protected]> Chen Qiu <[email protected]> Chris Gavin <[email protected]> Chris Gibson <[email protected]> Chris McKinnel <[email protected]> Chris Snow <[email protected]> Chris Weyl <[email protected]> Christian Persson <[email protected]> Christian Stefanescu <[email protected]> Christophe Robin <[email protected]> Christophe Vidal <[email protected]> Christopher Biscardi <[email protected]> Christopher Crone <[email protected]> Christopher Jones <[email protected]> Christy Norman <[email protected]> Chun Chen <[email protected]> Clinton Kitson <[email protected]> Coenraad Loubser <[email protected]> Colin Hebert <[email protected]> Collin Guarino <[email protected]> Colm Hally <[email protected]> Comical Derskeal <[email protected]> Corey Farrell <[email protected]> Corey Quon <[email protected]> Craig Wilhite <[email protected]> Cristian Staretu <[email protected]> Daehyeok Mun <[email protected]> Dafydd Crosby <[email protected]> Daisuke Ito <[email protected]> dalanlan <[email protected]> Damien Nadé <[email protected]> Dan Cotora <[email protected]> Daniel Artine <[email protected]> Daniel Cassidy <[email protected]> Daniel Dao <[email protected]> Daniel Farrell <[email protected]> Daniel Gasienica <[email protected]> Daniel Goosen <[email protected]> Daniel Helfand <[email protected]> Daniel Hiltgen <[email protected]> Daniel J Walsh <[email protected]> Daniel Nephin <[email protected]> Daniel Norberg <[email protected]> Daniel Watkins <[email protected]> Daniel Zhang <[email protected]> Daniil Nikolenko <[email protected]> Danny Berger <[email protected]> Darren Shepherd <[email protected]> Darren Stahl <[email protected]> Dattatraya Kumbhar <[email protected]> Dave Goodchild <[email protected]> Dave Henderson <[email protected]> Dave Tucker <[email protected]> David Beitey <[email protected]> David Calavera <[email protected]> David Cramer <[email protected]> David Dooling <[email protected]> David Gageot <[email protected]> David Lechner <[email protected]> David Scott <[email protected]> David Sheets <[email protected]> David Williamson <[email protected]> David Xia <[email protected]> David Young <[email protected]> Deng Guangxing <[email protected]> Denis Defreyne <[email protected]> Denis Gladkikh <[email protected]> Denis Ollier <[email protected]> Dennis Docter <[email protected]> Derek McGowan <[email protected]> Deshi Xiao <[email protected]> Dharmit Shah <[email protected]> Dhawal Yogesh Bhanushali <[email protected]> Dieter Reuter <[email protected]> Dima Stopel <[email protected]> Dimitry Andric <[email protected]> Ding Fei <[email protected]> Diogo Monica <[email protected]> Djordje Lukic <[email protected]> Dmitry Gusev <[email protected]> Dmitry Smirnov <[email protected]> Dmitry V. Krivenok <[email protected]> Dominik Braun <[email protected]> Don Kjer <[email protected]> Dong Chen <[email protected]> Doug Davis <[email protected]> Drew Erny <[email protected]> Ed Costello <[email protected]> Elango Sivanandam <[email protected]> Eli Uriegas <[email protected]> Eli Uriegas <[email protected]> Elias Faxö <[email protected]> Elliot Luo <[email protected]> Eric Curtin <[email protected]> Eric G. Noriega <[email protected]> Eric Rosenberg <[email protected]> Eric Sage <[email protected]> Eric-Olivier Lamey <[email protected]> Erica Windisch <[email protected]> Erik Hollensbe <[email protected]> Erik St. Martin <[email protected]> Essam A. Hassan <[email protected]> Ethan Haynes <[email protected]> Euan Kemp <[email protected]> Eugene Yakubovich <[email protected]> Evan Allrich <[email protected]> Evan Hazlett <[email protected]> Evan Krall <[email protected]> Evelyn Xu <[email protected]> Everett Toews <[email protected]> Fabio Falci <[email protected]> Fabrizio Soppelsa <[email protected]> Felix Hupfeld <[email protected]> Felix Rabe <[email protected]> Filip Jareš <[email protected]> Flavio Crisciani <[email protected]> Florian Klein <[email protected]> Forest Johnson <[email protected]> Foysal Iqbal <[email protected]> François Scala <[email protected]> Fred Lifton <[email protected]> Frederic Hemberger <[email protected]> Frederick F. Kautz IV <[email protected]> Frederik Nordahl Jul Sabroe <[email protected]> Frieder Bluemle <[email protected]> Gabriel Nicolas Avellaneda <[email protected]> Gaetan de Villele <[email protected]> Gang Qiao <[email protected]> Gary Schaetz <[email protected]> Genki Takiuchi <[email protected]> George MacRorie <[email protected]> George Xie <[email protected]> Gianluca Borello <[email protected]> Gildas Cuisinier <[email protected]> Goksu Toprak <[email protected]> Gou Rao <[email protected]> Grant Reaber <[email protected]> Greg Pflaum <[email protected]> Guilhem Lettron <[email protected]> Guillaume J. Charmes <[email protected]> Guillaume Le Floch <[email protected]> gwx296173 <[email protected]> Günther Jungbluth <[email protected]> Hakan Özler <[email protected]> Hao Zhang <[email protected]> Harald Albers <[email protected]> Harold Cooper <[email protected]> Harry Zhang <[email protected]> He Simei <[email protected]> Hector S <[email protected]> Helen Xie <[email protected]> Henning Sprang <[email protected]> Henry N <[email protected]> Hernan Garcia <[email protected]> Hongbin Lu <[email protected]> Hu Keping <[email protected]> Huayi Zhang <[email protected]> Hugo Gabriel Eyherabide <[email protected]> huqun <[email protected]> Huu Nguyen <[email protected]> Hyzhou Zhy <[email protected]> Ian Campbell <[email protected]> Ian Philpot <[email protected]> Ignacio Capurro <[email protected]> Ilya Dmitrichenko <[email protected]> Ilya Khlopotov <[email protected]> Ilya Sotkov <[email protected]> Ioan Eugen Stan <[email protected]> Isabel Jimenez <[email protected]> Ivan Grcic <[email protected]> Ivan Markin <[email protected]> Jacob Atzen <[email protected]> Jacob Tomlinson <[email protected]> Jaivish Kothari <[email protected]> Jake Lambert <[email protected]> Jake Sanders <[email protected]> James Nesbitt <[email protected]> James Turnbull <[email protected]> Jamie Hannaford <[email protected]> Jan Koprowski <[email protected]> Jan Pazdziora <[email protected]> Jan-Jaap Driessen <[email protected]> Jana Radhakrishnan <[email protected]> Jared Hocutt <[email protected]> Jasmine Hegman <[email protected]> Jason Heiss <[email protected]> Jason Plum <[email protected]> Jay Kamat <[email protected]> Jean Rouge <[email protected]> Jean-Christophe Sirot <[email protected]> Jean-Pierre Huynh <[email protected]> Jeff Lindsay <[email protected]> Jeff Nickoloff <[email protected]> Jeff Silberman <[email protected]> Jeremy Chambers <[email protected]> Jeremy Unruh <[email protected]> Jeremy Yallop <[email protected]> Jeroen Franse <[email protected]> Jesse Adametz <[email protected]> Jessica Frazelle <[email protected]> Jezeniel Zapanta <[email protected]> Jian Zhang <[email protected]> Jie Luo <[email protected]> Jilles Oldenbeuving <[email protected]> Jim Galasyn <[email protected]> Jimmy Leger <[email protected]> Jimmy Song <[email protected]> jimmyxian <[email protected]> Jintao Zhang <[email protected]> Joao Fernandes <[email protected]> Joe Abbey <[email protected]> Joe Doliner <[email protected]> Joe Gordon <[email protected]> Joel Handwell <[email protected]> Joey Geiger <[email protected]> Joffrey F <[email protected]> Johan Euphrosine <[email protected]> Johannes 'fish' Ziemke <[email protected]> John Feminella <[email protected]> John Harris <[email protected]> John Howard <[email protected]> John Laswell <[email protected]> John Maguire <[email protected]> John Mulhausen <[email protected]> John Starks <[email protected]> John Stephens <[email protected]> John Tims <[email protected]> John V. Martinez <[email protected]> John Willis <[email protected]> Jon Johnson <[email protected]> Jonatas Baldin <[email protected]> Jonathan Boulle <[email protected]> Jonathan Lee <[email protected]> Jonathan Lomas <[email protected]> Jonathan McCrohan <[email protected]> Jonh Wendell <[email protected]> Jordan Jennings <[email protected]> Jose J. Escobar <[email protected]> Joseph Kern <[email protected]> Josh Bodah <[email protected]> Josh Chorlton <[email protected]> Josh Hawn <[email protected]> Josh Horwitz <[email protected]> Josh Soref <[email protected]> Julien Barbier <[email protected]> Julien Kassar <[email protected]> Julien Maitrehenry <[email protected]> Justas Brazauskas <[email protected]> Justin Cormack <[email protected]> Justin Simonelis <[email protected]> Justyn Temme <[email protected]> Jyrki Puttonen <[email protected]> Jérémie Drouet <[email protected]> Jérôme Petazzoni <[email protected]> Jörg Thalheim <[email protected]> Kai Blin <[email protected]> Kai Qiang Wu (Kennan) <[email protected]> Kara Alexandra <[email protected]> Kareem Khazem <[email protected]> Karthik Nayak <[email protected]> Kat Samperi <[email protected]> Kathryn Spiers <[email protected]> Katie McLaughlin <[email protected]> Ke Xu <[email protected]> Kei Ohmura <[email protected]> Keith Hudgins <[email protected]> Ken Cochrane <[email protected]> Ken ICHIKAWA <[email protected]> Kenfe-Mickaël Laventure <[email protected]> Kevin Burke <[email protected]> Kevin Feyrer <[email protected]> Kevin Kern <[email protected]> Kevin Kirsche <[email protected]> Kevin Meredith <[email protected]> Kevin Richardson <[email protected]> Kevin Woblick <[email protected]> khaled souf <[email protected]> Kim Eik <[email protected]> Kir Kolyshkin <[email protected]> Kotaro Yoshimatsu <[email protected]> Krasi Georgiev <[email protected]> Kris-Mikael Krister <[email protected]> Kun Zhang <[email protected]> Kunal Kushwaha <[email protected]> Lachlan Cooper <[email protected]> Lai Jiangshan <[email protected]> Lars Kellogg-Stedman <[email protected]> Laura Frank <[email protected]> Laurent Erignoux <[email protected]> Lee Gaines <[email protected]> Lei Jitang <[email protected]> Lennie <[email protected]> Leo Gallucci <[email protected]> Lewis Daly <[email protected]> Li Yi <[email protected]> Li Yi <[email protected]> Liang-Chi Hsieh <[email protected]> Lifubang <[email protected]> Lihua Tang <[email protected]> Lily Guo <[email protected]> Lin Lu <[email protected]> Linus Heckemann <[email protected]> Liping Xue <[email protected]> Liron Levin <[email protected]> liwenqi <[email protected]> lixiaobing10051267 <[email protected]> Lloyd Dewolf <[email protected]> Lorenzo Fontana <[email protected]> Louis Opter <[email protected]> Luca Favatella <[email protected]> Luca Marturana <[email protected]> Lucas Chan <[email protected]> Luka Hartwig <[email protected]> Lukas Heeren <[email protected]> Lukasz Zajaczkowski <[email protected]> Lydell Manganti <[email protected]> Lénaïc Huard <[email protected]> Ma Shimiao <[email protected]> Mabin <[email protected]> Maciej Kalisz <[email protected]> Madhav Puri <[email protected]> Madhu Venugopal <[email protected]> Madhur Batra <[email protected]> Malte Janduda <[email protected]> Manjunath A Kumatagi <[email protected]> Mansi Nahar <[email protected]> mapk0y <[email protected]> Marc Bihlmaier <[email protected]> Marco Mariani <[email protected]> Marco Vedovati <[email protected]> Marcus Martins <[email protected]> Marianna Tessel <[email protected]> Marius Ileana <[email protected]> Marius Sturm <[email protected]> Mark Oates <[email protected]> Marsh Macy <[email protected]> Martin Mosegaard Amdisen <[email protected]> Mary Anthony <[email protected]> Mason Fish <[email protected]> Mason Malone <[email protected]> Mateusz Major <[email protected]> Mathieu Champlon <[email protected]> Matt Gucci <[email protected]> Matt Robenolt <[email protected]> Matteo Orefice <[email protected]> Matthew Heon <[email protected]> Matthieu Hauglustaine <[email protected]> Mauro Porras P <[email protected]> Max Shytikov <[email protected]> Maxime Petazzoni <[email protected]> Mei ChunTao <[email protected]> Micah Zoltu <[email protected]> Michael A. Smith <[email protected]> Michael Bridgen <[email protected]> Michael Crosby <[email protected]> Michael Friis <[email protected]> Michael Irwin <[email protected]> Michael Käufl <[email protected]> Michael Prokop <[email protected]> Michael Scharf <[email protected]> Michael Spetsiotis <[email protected]> Michael Steinert <[email protected]> Michael West <[email protected]> Michal Minář <[email protected]> Michał Czeraszkiewicz <[email protected]> Miguel Angel Alvarez Cabrerizo <[email protected]> Mihai Borobocea <[email protected]> Mihuleacc Sergiu <[email protected]> Mike Brown <[email protected]> Mike Casas <[email protected]> Mike Danese <[email protected]> Mike Dillon <[email protected]> Mike Goelzer <[email protected]> Mike MacCana <[email protected]> mikelinjie <[email protected]> Mikhail Vasin <[email protected]> Milind Chawre <[email protected]> Mindaugas Rukas <[email protected]> Miroslav Gula <[email protected]> Misty Stanley-Jones <[email protected]> Mohammad Banikazemi <[email protected]> Mohammed Aaqib Ansari <[email protected]> Mohini Anne Dsouza <[email protected]> Moorthy RS <[email protected]> Morgan Bauer <[email protected]> Morten Hekkvang <[email protected]> Moysés Borges <[email protected]> Mrunal Patel <[email protected]> muicoder <[email protected]> Muthukumar R <[email protected]> Máximo Cuadros <[email protected]> Mårten Cassel <[email protected]> Nace Oroz <[email protected]> Nahum Shalman <[email protected]> Nalin Dahyabhai <[email protected]> Nao YONASHIRO <[email protected]> Nassim 'Nass' Eddequiouaq <[email protected]> Natalie Parker <[email protected]> Nate Brennand <[email protected]> Nathan Hsieh <[email protected]> Nathan LeClaire <[email protected]> Nathan McCauley <[email protected]> Neil Peterson <[email protected]> Nick Adcock <[email protected]> Nico Stapelbroek <[email protected]> Nicola Kabar <[email protected]> Nicolas Borboën <[email protected]> Nicolas De Loof <[email protected]> Nikhil Chawla <[email protected]> Nikolas Garofil <[email protected]> Nikolay Milovanov <[email protected]> Nir Soffer <[email protected]> Nishant Totla <[email protected]> NIWA Hideyuki <[email protected]> Noah Treuhaft <[email protected]> O.S. Tezer <[email protected]> Odin Ugedal <[email protected]> ohmystack <[email protected]> Olle Jonsson <[email protected]> Olli Janatuinen <[email protected]> Oscar Wieman <[email protected]> Otto Kekäläinen <[email protected]> Ovidio Mallo <[email protected]> Pascal Borreli <[email protected]> Patrick Böänziger <[email protected]> Patrick Hemmer <[email protected]> Patrick Lang <[email protected]> Paul <[email protected]> Paul Kehrer <[email protected]> Paul Lietar <[email protected]> Paul Mulders <[email protected]> Paul Weaver <[email protected]> Pavel Pospisil <[email protected]> Paweł Szczekutowicz <[email protected]> Peeyush Gupta <[email protected]> Per Lundberg <[email protected]> Peter Edge <[email protected]> Peter Hsu <[email protected]> Peter Jaffe <[email protected]> Peter Kehl <[email protected]> Peter Nagy <[email protected]> Peter Salvatore <[email protected]> Peter Waller <[email protected]> Phil Estes <[email protected]> Philip Alexander Etling <[email protected]> Philipp Gillé <[email protected]> Philipp Schmied <[email protected]> pidster <[email protected]> pixelistik <[email protected]> Pratik Karki <[email protected]> Prayag Verma <[email protected]> Preston Cowley <[email protected]> Pure White <[email protected]> Qiang Huang <[email protected]> Qinglan Peng <[email protected]> qudongfang <[email protected]> Raghavendra K T <[email protected]> Rahul Zoldyck <[email protected]> Ravi Shekhar Jethani <[email protected]> Ray Tsang <[email protected]> Reficul <[email protected]> Remy Suen <[email protected]> Renaud Gaubert <[email protected]> Ricardo N Feliciano <[email protected]> Rich Moyse <[email protected]> Richard Mathie <[email protected]> Richard Scothern <[email protected]> Rick Wieman <[email protected]> Ritesh H Shukla <[email protected]> Riyaz Faizullabhoy <[email protected]> Rob Gulewich <[email protected]> Robert Wallis <[email protected]> Robin Naundorf <[email protected]> Robin Speekenbrink <[email protected]> Rodolfo Ortiz <[email protected]> Rogelio Canedo <[email protected]> Rohan Verma <[email protected]> Roland Kammerer <[email protected]> Roman Dudin <[email protected]> Rory Hunter <[email protected]> Ross Boucher <[email protected]> Rubens Figueiredo <[email protected]> Rui Cao <[email protected]> Ryan Belgrave <[email protected]> Ryan Detzel <[email protected]> Ryan Stelly <[email protected]> Ryan Wilson-Perkin <[email protected]> Ryan Zhang <[email protected]> Sainath Grandhi <[email protected]> Sakeven Jiang <[email protected]> Sally O'Malley <[email protected]> Sam Neirinck <[email protected]> Samarth Shah <[email protected]> Sambuddha Basu <[email protected]> Sami Tabet <[email protected]> Samuel Cochran <[email protected]> Samuel Karp <[email protected]> Santhosh Manohar <[email protected]> Sargun Dhillon <[email protected]> Saswat Bhattacharya <[email protected]> Scott Brenner <[email protected]> Scott Collier <[email protected]> Sean Christopherson <[email protected]> Sean Rodman <[email protected]> Sebastiaan van Stijn <[email protected]> Sergey Tryuber <[email protected]> Serhat Gülçiçek <[email protected]> Sevki Hasirci <[email protected]> Shaun Kaasten <[email protected]> Sheng Yang <[email protected]> Shijiang Wei <[email protected]> Shishir Mahajan <[email protected]> Shoubhik Bose <[email protected]> Shukui Yang <[email protected]> Sian Lerk Lau <[email protected]> Sidhartha Mani <[email protected]> sidharthamani <[email protected]> Silvin Lubecki <[email protected]> Simei He <[email protected]> Simon Ferquel <[email protected]> Simon Heimberg <[email protected]> Sindhu S <[email protected]> Slava Semushin <[email protected]> Solomon Hykes <[email protected]> Song Gao <[email protected]> Spencer Brown <[email protected]> squeegels <[email protected]> Srini Brahmaroutu <[email protected]> Stefan S. <[email protected]> Stefan Scherer <[email protected]> Stefan Weil <[email protected]> Stephane Jeandeaux <[email protected]> Stephen Day <[email protected]> Stephen Rust <[email protected]> Steve Durrheimer <[email protected]> Steve Richards <[email protected]> Steven Burgess <[email protected]> Subhajit Ghosh <[email protected]> Sun Jianbo <[email protected]> Sune Keller <[email protected]> Sungwon Han <[email protected]> Sunny Gogoi <[email protected]> Sven Dowideit <[email protected]> Sylvain Baubeau <[email protected]> Sébastien HOUZÉ <[email protected]> T K Sourabh <[email protected]> TAGOMORI Satoshi <[email protected]> taiji-tech <[email protected]> Taylor Jones <[email protected]> Tejaswini Duggaraju <[email protected]> Tengfei Wang <[email protected]> Teppei Fukuda <[email protected]> Thatcher Peskens <[email protected]> Thibault Coupin <[email protected]> Thomas Gazagnaire <[email protected]> Thomas Krzero <[email protected]> Thomas Leonard <[email protected]> Thomas Léveil <[email protected]> Thomas Riccardi <[email protected]> Thomas Swift <[email protected]> Tianon Gravi <[email protected]> Tianyi Wang <[email protected]> Tibor Vass <[email protected]> Tim Dettrick <[email protected]> Tim Hockin <[email protected]> Tim Sampson <[email protected]> Tim Smith <[email protected]> Tim Waugh <[email protected]> Tim Wraight <[email protected]> timfeirg <[email protected]> Timothy Hobbs <[email protected]> Tobias Bradtke <[email protected]> Tobias Gesellchen <[email protected]> Todd Whiteman <[email protected]> Tom Denham <[email protected]> Tom Fotherby <[email protected]> Tom Klingenberg <[email protected]> Tom Milligan <[email protected]> Tom X. Tobin <[email protected]> Tomas Tomecek <[email protected]> Tomasz Kopczynski <[email protected]> Tomáš Hrčka <[email protected]> Tony Abboud <[email protected]> Tõnis Tiigi <[email protected]> Trapier Marshall <[email protected]> Travis Cline <[email protected]> Tristan Carel <[email protected]> Tycho Andersen <[email protected]> Tycho Andersen <[email protected]> uhayate <[email protected]> Ulrich Bareth <[email protected]> Ulysses Souza <[email protected]> Umesh Yadav <[email protected]> Valentin Lorentz <[email protected]> Venkateswara Reddy Bukkasamudram <[email protected]> Veres Lajos <[email protected]> Victor Vieux <[email protected]> Victoria Bialas <[email protected]> Viktor Stanchev <[email protected]> Vimal Raghubir <[email protected]> Vincent Batts <[email protected]> Vincent Bernat <[email protected]> Vincent Demeester <[email protected]> Vincent Woo <[email protected]> Vishnu Kannan <[email protected]> Vivek Goyal <[email protected]> Wang Jie <[email protected]> Wang Lei <[email protected]> Wang Long <[email protected]> Wang Ping <[email protected]> Wang Xing <[email protected]> Wang Yuexiao <[email protected]> Wang Yumu <[email protected]> Wataru Ishida <[email protected]> Wayne Song <[email protected]> Wen Cheng Ma <[email protected]> Wenzhi Liang <[email protected]> Wes Morgan <[email protected]> Wewang Xiaorenfine <[email protected]> William Henry <[email protected]> Xianglin Gao <[email protected]> Xiaodong Liu <[email protected]> Xiaodong Zhang <[email protected]> Xiaoxi He <[email protected]> Xinbo Weng <[email protected]> Xuecong Liao <[email protected]> Yan Feng <[email protected]> Yanqiang Miao <[email protected]> Yassine Tijani <[email protected]> Yi EungJun <[email protected]> Ying Li <[email protected]> Yong Tang <[email protected]> Yosef Fertel <[email protected]> Yu Peng <[email protected]> Yuan Sun <[email protected]> Yue Zhang <[email protected]> Yunxiang Huang <[email protected]> Zachary Romero <[email protected]> Zander Mackie <[email protected]> zebrilee <[email protected]> Zhang Kun <[email protected]> Zhang Wei <[email protected]> Zhang Wentao <[email protected]> ZhangHang <[email protected]> zhenghenghuo <[email protected]> Zhou Hao <[email protected]> Zhoulin Xie <[email protected]> Zhu Guihua <[email protected]> Álex González <[email protected]> Álvaro Lázaro <[email protected]> Átila Camurça Alves <[email protected]> 徐俊杰 <[email protected]>
9,551
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2017 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,552
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/config.go
package config import ( "fmt" "io" "os" "path/filepath" "strings" "sync" "github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" ) const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" configFileDir = ".docker" oldConfigfile = ".dockercfg" contextsDir = "contexts" ) var ( initConfigDir sync.Once configDir string ) func setConfigDir() { if configDir != "" { return } configDir = os.Getenv("DOCKER_CONFIG") if configDir == "" { configDir = filepath.Join(homedir.Get(), configFileDir) } } // Dir returns the directory the configuration file is stored in func Dir() string { initConfigDir.Do(setConfigDir) return configDir } // ContextStoreDir returns the directory the docker contexts are stored in func ContextStoreDir() string { return filepath.Join(Dir(), contextsDir) } // SetDir sets the directory the configuration file is stored in func SetDir(dir string) { configDir = filepath.Clean(dir) } // Path returns the path to a file relative to the config dir func Path(p ...string) (string, error) { path := filepath.Join(append([]string{Dir()}, p...)...) if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) } return path, nil } // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LegacyLoadFromReader(configData) return &configFile, err } // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LoadFromReader(configData) return &configFile, err } // Load reads the configuration files in the given directory, and sets up // the auth config information and returns values. // FIXME: use the internal golang config parser func Load(configDir string) (*configfile.ConfigFile, error) { if configDir == "" { configDir = Dir() } filename := filepath.Join(configDir, ConfigFileName) configFile := configfile.New(filename) // Try happy path first - latest config file if file, err := os.Open(filename); err == nil { defer file.Close() err = configFile.LoadFromReader(file) if err != nil { err = errors.Wrap(err, filename) } return configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop return configFile, errors.Wrap(err, filename) } // Can't find latest config file so check for the old one home, err := os.UserHomeDir() if err != nil { return configFile, errors.Wrap(err, oldConfigfile) } filename = filepath.Join(home, oldConfigfile) if file, err := os.Open(filename); err == nil { defer file.Close() if err := configFile.LegacyLoadFromReader(file); err != nil { return configFile, errors.Wrap(err, filename) } } return configFile, nil } // LoadDefaultConfigFile attempts to load the default config file and returns // an initialized ConfigFile struct if none is found. func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { configFile, err := Load(Dir()) if err != nil { fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) } return configFile }
9,553
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/native_store.go
package credentials import ( "github.com/docker/cli/cli/config/types" "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" ) const ( remoteCredentialsPrefix = "docker-credential-" tokenUsername = "<token>" ) // nativeStore implements a credentials store // using native keychain to keep credentials secure. // It piggybacks into a file store to keep users' emails. type nativeStore struct { programFunc client.ProgramFunc fileStore Store } // NewNativeStore creates a new native store that // uses a remote helper program to manage credentials. func NewNativeStore(file store, helperSuffix string) Store { name := remoteCredentialsPrefix + helperSuffix return &nativeStore{ programFunc: client.NewShellProgramFunc(name), fileStore: NewFileStore(file), } } // Erase removes the given credentials from the native store. func (c *nativeStore) Erase(serverAddress string) error { if err := client.Erase(c.programFunc, serverAddress); err != nil { return err } // Fallback to plain text store to remove email return c.fileStore.Erase(serverAddress) } // Get retrieves credentials for a specific server from the native store. func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { // load user email if it exist or an empty auth config. auth, _ := c.fileStore.Get(serverAddress) creds, err := c.getCredentialsFromStore(serverAddress) if err != nil { return auth, err } auth.Username = creds.Username auth.IdentityToken = creds.IdentityToken auth.Password = creds.Password return auth, nil } // GetAll retrieves all the credentials from the native store. func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { auths, err := c.listCredentialsInStore() if err != nil { return nil, err } // Emails are only stored in the file store. // This call can be safely eliminated when emails are removed. fileConfigs, _ := c.fileStore.GetAll() authConfigs := make(map[string]types.AuthConfig) for registry := range auths { creds, err := c.getCredentialsFromStore(registry) if err != nil { return nil, err } ac := fileConfigs[registry] // might contain Email ac.Username = creds.Username ac.Password = creds.Password ac.IdentityToken = creds.IdentityToken authConfigs[registry] = ac } return authConfigs, nil } // Store saves the given credentials in the file store. func (c *nativeStore) Store(authConfig types.AuthConfig) error { if err := c.storeCredentialsInStore(authConfig); err != nil { return err } authConfig.Username = "" authConfig.Password = "" authConfig.IdentityToken = "" // Fallback to old credential in plain text to save only the email return c.fileStore.Store(authConfig) } // storeCredentialsInStore executes the command to store the credentials in the native store. func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { creds := &credentials.Credentials{ ServerURL: config.ServerAddress, Username: config.Username, Secret: config.Password, } if config.IdentityToken != "" { creds.Username = tokenUsername creds.Secret = config.IdentityToken } return client.Store(c.programFunc, creds) } // getCredentialsFromStore executes the command to get the credentials from the native store. func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { var ret types.AuthConfig creds, err := client.Get(c.programFunc, serverAddress) if err != nil { if credentials.IsErrCredentialsNotFound(err) { // do not return an error if the credentials are not // in the keychain. Let docker ask for new credentials. return ret, nil } return ret, err } if creds.Username == tokenUsername { ret.IdentityToken = creds.Secret } else { ret.Password = creds.Secret ret.Username = creds.Username } ret.ServerAddress = serverAddress return ret, nil } // listCredentialsInStore returns a listing of stored credentials as a map of // URL -> username. func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { return client.List(c.programFunc) }
9,554
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
// +build !windows,!darwin,!linux package credentials func defaultCredentialsStore() string { return "" }
9,555
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/file_store.go
package credentials import ( "strings" "github.com/docker/cli/cli/config/types" ) type store interface { Save() error GetAuthConfigs() map[string]types.AuthConfig GetFilename() string } // fileStore implements a credentials store using // the docker configuration file to keep the credentials in plain text. type fileStore struct { file store } // NewFileStore creates a new file credentials store. func NewFileStore(file store) Store { return &fileStore{file: file} } // Erase removes the given credentials from the file store. func (c *fileStore) Erase(serverAddress string) error { delete(c.file.GetAuthConfigs(), serverAddress) return c.file.Save() } // Get retrieves credentials for a specific server from the file store. func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { authConfig, ok := c.file.GetAuthConfigs()[serverAddress] if !ok { // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for r, ac := range c.file.GetAuthConfigs() { if serverAddress == ConvertToHostname(r) { return ac, nil } } authConfig = types.AuthConfig{} } return authConfig, nil } func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } // Store saves the given credentials in the file store. func (c *fileStore) Store(authConfig types.AuthConfig) error { c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig return c.file.Save() } func (c *fileStore) GetFilename() string { return c.file.GetFilename() } func (c *fileStore) IsFileStore() bool { return true } // ConvertToHostname converts a registry url which has http|https prepended // to just an hostname. // Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. func ConvertToHostname(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.TrimPrefix(url, "http://") } else if strings.HasPrefix(url, "https://") { stripped = strings.TrimPrefix(url, "https://") } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] }
9,556
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
package credentials func defaultCredentialsStore() string { return "wincred" }
9,557
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/credentials.go
package credentials import ( "github.com/docker/cli/cli/config/types" ) // Store is the interface that any credentials store must implement. type Store interface { // Erase removes credentials from the store for a given server. Erase(serverAddress string) error // Get retrieves credentials from the store for a given server. Get(serverAddress string) (types.AuthConfig, error) // GetAll retrieves all the credentials from the store. GetAll() (map[string]types.AuthConfig, error) // Store saves credentials in the store. Store(authConfig types.AuthConfig) error }
9,558
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/default_store.go
package credentials import ( exec "golang.org/x/sys/execabs" ) // DetectDefaultStore return the default credentials store for the platform if // the store executable is available. func DetectDefaultStore(store string) string { platformDefault := defaultCredentialsStore() // user defined or no default for platform if store != "" || platformDefault == "" { return store } if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { return platformDefault } return "" }
9,559
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
package credentials func defaultCredentialsStore() string { return "osxkeychain" }
9,560
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
package credentials import ( "os/exec" ) func defaultCredentialsStore() string { if _, err := exec.LookPath("pass"); err == nil { return "pass" } return "secretservice" }
9,561
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/configfile/file.go
package configfile import ( "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // This constant is only used for really old config files when the // URL wasn't saved as part of the config file and it was just // assumed to be this value. defaultIndexServer = "https://index.docker.io/v1/" ) // ConfigFile ~/.docker/config.json file info type ConfigFile struct { AuthConfigs map[string]types.AuthConfig `json:"auths"` HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` PsFormat string `json:"psFormat,omitempty"` ImagesFormat string `json:"imagesFormat,omitempty"` NetworksFormat string `json:"networksFormat,omitempty"` PluginsFormat string `json:"pluginsFormat,omitempty"` VolumesFormat string `json:"volumesFormat,omitempty"` StatsFormat string `json:"statsFormat,omitempty"` DetachKeys string `json:"detachKeys,omitempty"` CredentialsStore string `json:"credsStore,omitempty"` CredentialHelpers map[string]string `json:"credHelpers,omitempty"` Filename string `json:"-"` // Note: for internal use only ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` ServicesFormat string `json:"servicesFormat,omitempty"` TasksFormat string `json:"tasksFormat,omitempty"` SecretFormat string `json:"secretFormat,omitempty"` ConfigFormat string `json:"configFormat,omitempty"` NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` StackOrchestrator string `json:"stackOrchestrator,omitempty"` Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` } // ProxyConfig contains proxy configuration settings type ProxyConfig struct { HTTPProxy string `json:"httpProxy,omitempty"` HTTPSProxy string `json:"httpsProxy,omitempty"` NoProxy string `json:"noProxy,omitempty"` FTPProxy string `json:"ftpProxy,omitempty"` } // KubernetesConfig contains Kubernetes orchestrator settings type KubernetesConfig struct { AllNamespaces string `json:"allNamespaces,omitempty"` } // New initializes an empty configuration file for the given filename 'fn' func New(fn string) *ConfigFile { return &ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), HTTPHeaders: make(map[string]string), Filename: fn, Plugins: make(map[string]map[string]string), Aliases: make(map[string]string), } } // LegacyLoadFromReader reads the non-nested configuration data given and sets up the // auth config information with given directory and populates the receiver object func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { b, err := ioutil.ReadAll(configData) if err != nil { return err } if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { arr := strings.Split(string(b), "\n") if len(arr) < 2 { return errors.Errorf("The Auth config file is empty") } authConfig := types.AuthConfig{} origAuth := strings.Split(arr[0], " = ") if len(origAuth) != 2 { return errors.Errorf("Invalid Auth config file") } authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { return err } authConfig.ServerAddress = defaultIndexServer configFile.AuthConfigs[defaultIndexServer] = authConfig } else { for k, authConfig := range configFile.AuthConfigs { authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { return err } authConfig.Auth = "" authConfig.ServerAddress = k configFile.AuthConfigs[k] = authConfig } } return nil } // LoadFromReader reads the configuration data given and sets up the auth config // information with given directory and populates the receiver object func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) { return err } var err error for addr, ac := range configFile.AuthConfigs { if ac.Auth != "" { ac.Username, ac.Password, err = decodeAuth(ac.Auth) if err != nil { return err } } ac.Auth = "" ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } return checkKubernetesConfiguration(configFile.Kubernetes) } // ContainsAuth returns whether there is authentication configured // in this file or not. func (configFile *ConfigFile) ContainsAuth() bool { return configFile.CredentialsStore != "" || len(configFile.CredentialHelpers) > 0 || len(configFile.AuthConfigs) > 0 } // GetAuthConfigs returns the mapping of repo to auth configuration func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { return configFile.AuthConfigs } // SaveToWriter encodes and writes out all the authorization information to // the given writer func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { // Encode sensitive data into a new/temp struct tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) for k, authConfig := range configFile.AuthConfigs { authCopy := authConfig // encode and save the authstring, while blanking out the original fields authCopy.Auth = encodeAuth(&authCopy) authCopy.Username = "" authCopy.Password = "" authCopy.ServerAddress = "" tmpAuthConfigs[k] = authCopy } saveAuthConfigs := configFile.AuthConfigs configFile.AuthConfigs = tmpAuthConfigs defer func() { configFile.AuthConfigs = saveAuthConfigs }() // User-Agent header is automatically set, and should not be stored in the configuration for v := range configFile.HTTPHeaders { if strings.EqualFold(v, "User-Agent") { delete(configFile.HTTPHeaders, v) } } data, err := json.MarshalIndent(configFile, "", "\t") if err != nil { return err } _, err = writer.Write(data) return err } // Save encodes and writes out all the authorization information func (configFile *ConfigFile) Save() (retErr error) { if configFile.Filename == "" { return errors.Errorf("Can't save config with empty filename") } dir := filepath.Dir(configFile.Filename) if err := os.MkdirAll(dir, 0700); err != nil { return err } temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) if err != nil { return err } defer func() { temp.Close() if retErr != nil { if err := os.Remove(temp.Name()); err != nil { logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") } } }() err = configFile.SaveToWriter(temp) if err != nil { return err } if err := temp.Close(); err != nil { return errors.Wrap(err, "error closing temp file") } // Handle situation where the configfile is a symlink cfgFile := configFile.Filename if f, err := os.Readlink(cfgFile); err == nil { cfgFile = f } // Try copying the current config file (if any) ownership and permissions copyFilePermissions(cfgFile, temp.Name()) return os.Rename(temp.Name(), cfgFile) } // ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and // then checking this against any environment variables provided to the container func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { var cfgKey string if _, ok := configFile.Proxies[host]; !ok { cfgKey = "default" } else { cfgKey = host } config := configFile.Proxies[cfgKey] permitted := map[string]*string{ "HTTP_PROXY": &config.HTTPProxy, "HTTPS_PROXY": &config.HTTPSProxy, "NO_PROXY": &config.NoProxy, "FTP_PROXY": &config.FTPProxy, } m := runOpts if m == nil { m = make(map[string]*string) } for k := range permitted { if *permitted[k] == "" { continue } if _, ok := m[k]; !ok { m[k] = permitted[k] } if _, ok := m[strings.ToLower(k)]; !ok { m[strings.ToLower(k)] = permitted[k] } } return m } // encodeAuth creates a base64 encoded string to containing authorization information func encodeAuth(authConfig *types.AuthConfig) string { if authConfig.Username == "" && authConfig.Password == "" { return "" } authStr := authConfig.Username + ":" + authConfig.Password msg := []byte(authStr) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) base64.StdEncoding.Encode(encoded, msg) return string(encoded) } // decodeAuth decodes a base64 encoded string and returns username and password func decodeAuth(authStr string) (string, string, error) { if authStr == "" { return "", "", nil } decLen := base64.StdEncoding.DecodedLen(len(authStr)) decoded := make([]byte, decLen) authByte := []byte(authStr) n, err := base64.StdEncoding.Decode(decoded, authByte) if err != nil { return "", "", err } if n > decLen { return "", "", errors.Errorf("Something went wrong decoding auth config") } arr := strings.SplitN(string(decoded), ":", 2) if len(arr) != 2 { return "", "", errors.Errorf("Invalid auth configuration file") } password := strings.Trim(arr[1], "\x00") return arr[0], password, nil } // GetCredentialsStore returns a new credentials store from the settings in the // configuration file func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { return newNativeStore(configFile, helper) } return credentials.NewFileStore(configFile) } // var for unit testing. var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { return credentials.NewNativeStore(configFile, helperSuffix) } // GetAuthConfig for a repository from the credential store func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) } // getConfiguredCredentialStore returns the credential helper configured for the // given registry, the default credsStore, or the empty string if neither are // configured. func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { if c.CredentialHelpers != nil && registryHostname != "" { if helper, exists := c.CredentialHelpers[registryHostname]; exists { return helper } } return c.CredentialsStore } // GetAllCredentials returns all of the credentials stored in all of the // configured credential stores. func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { auths := make(map[string]types.AuthConfig) addAll := func(from map[string]types.AuthConfig) { for reg, ac := range from { auths[reg] = ac } } defaultStore := configFile.GetCredentialsStore("") newAuths, err := defaultStore.GetAll() if err != nil { return nil, err } addAll(newAuths) // Auth configs from a registry-specific helper should override those from the default store. for registryHostname := range configFile.CredentialHelpers { newAuth, err := configFile.GetAuthConfig(registryHostname) if err != nil { return nil, err } auths[registryHostname] = newAuth } return auths, nil } // GetFilename returns the file name that this config file is based on. func (configFile *ConfigFile) GetFilename() string { return configFile.Filename } // PluginConfig retrieves the requested option for the given plugin. func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { if configFile.Plugins == nil { return "", false } pluginConfig, ok := configFile.Plugins[pluginname] if !ok { return "", false } value, ok := pluginConfig[option] return value, ok } // SetPluginConfig sets the option to the given value for the given // plugin. Passing a value of "" will remove the option. If removing // the final config item for a given plugin then also cleans up the // overall plugin entry. func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { if configFile.Plugins == nil { configFile.Plugins = make(map[string]map[string]string) } pluginConfig, ok := configFile.Plugins[pluginname] if !ok { pluginConfig = make(map[string]string) configFile.Plugins[pluginname] = pluginConfig } if value != "" { pluginConfig[option] = value } else { delete(pluginConfig, option) } if len(pluginConfig) == 0 { delete(configFile.Plugins, pluginname) } } func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { if kubeConfig == nil { return nil } switch kubeConfig.AllNamespaces { case "": case "enabled": case "disabled": default: return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) } return nil }
9,562
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go
// +build !windows package configfile import ( "os" "syscall" ) // copyFilePermissions copies file ownership and permissions from "src" to "dst", // ignoring any error during the process. func copyFilePermissions(src, dst string) { var ( mode os.FileMode = 0600 uid, gid int ) fi, err := os.Stat(src) if err != nil { return } if fi.Mode().IsRegular() { mode = fi.Mode() } if err := os.Chmod(dst, mode); err != nil { return } uid = int(fi.Sys().(*syscall.Stat_t).Uid) gid = int(fi.Sys().(*syscall.Stat_t).Gid) if uid > 0 && gid > 0 { _ = os.Chown(dst, uid, gid) } }
9,563
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go
package configfile func copyFilePermissions(src, dst string) { // TODO implement for Windows }
9,564
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config
kubeflow_public_repos/fate-operator/vendor/github.com/docker/cli/cli/config/types/authconfig.go
package types // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. // This field is deprecated and will be removed in a later // version of docker. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` // IdentityToken is used to authenticate the user and get // an access token for the registry. IdentityToken string `json:"identitytoken,omitempty"` // RegistryToken is a bearer token to be sent to a registry RegistryToken string `json:"registrytoken,omitempty"` }
9,565
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,566
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/tlsconfig/config.go
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig import ( "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io/ioutil" "os" "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. type Options struct { CAFile string // If either CertFile or KeyFile is empty, Client() will not load them // preventing the client from authenticating to the server. // However, Server() requires them and will error out if they are empty. CertFile string KeyFile string // client-only option InsecureSkipVerify bool // server-only option ClientAuth tls.ClientAuthType // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS // creds will include exclusively the roots in that CA file. If no CA file is provided, // the system pool will be used. ExclusiveRootPools bool MinVersion uint16 // If Passphrase is set, it will be used to decrypt a TLS private key // if the key is encrypted Passphrase string } // Extra (server-side) accepted CBC cipher suites - will phase out in the future var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) // allTLSVersions lists all the TLS versions and is used by the code that validates // a uint16 value as a TLS version. var allTLSVersions = map[uint16]struct{}{ tls.VersionSSL30: {}, tls.VersionTLS10: {}, tls.VersionTLS11: {}, tls.VersionTLS12: {}, } // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { tlsconfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } for _, op := range ops { op(tlsconfig) } return tlsconfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { tlsconfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { op(tlsconfig) } return tlsconfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca var ( certPool *x509.CertPool err error ) if exclusivePool { certPool = x509.NewCertPool() } else { certPool, err = SystemCertPool() if err != nil { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } pem, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil } // isValidMinVersion checks that the input value is a valid tls minimum version func isValidMinVersion(version uint16) bool { _, ok := allTLSVersions[version] return ok } // adjustMinVersion sets the MinVersion on `config`, the input configuration. // It assumes the current MinVersion on the `config` is the lowest allowed. func adjustMinVersion(options Options, config *tls.Config) error { if options.MinVersion > 0 { if !isValidMinVersion(options.MinVersion) { return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) } if options.MinVersion < config.MinVersion { return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) } config.MinVersion = options.MinVersion } return nil } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect // password when tryin to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. // If the private key is encrypted, 'passphrase' is used to decrypted the // private key. func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { // this section makes some small changes to code from notary/tuf/utils/x509.go pemBlock, _ := pem.Decode(keyBytes) if pemBlock == nil { return nil, fmt.Errorf("no valid private key found") } var err error if x509.IsEncryptedPEMBlock(pemBlock) { keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) } return keyBytes, nil } // getCert returns a Certificate from the CertFile and KeyFile in 'options', // if the key is encrypted, the Passphrase in 'options' will be used to // decrypt it. func getCert(options Options) ([]tls.Certificate, error) { if options.CertFile == "" && options.KeyFile == "" { return nil, nil } errMessage := "Could not load X509 key pair" cert, err := ioutil.ReadFile(options.CertFile) if err != nil { return nil, errors.Wrap(err, errMessage) } prKeyBytes, err := ioutil.ReadFile(options.KeyFile) if err != nil { return nil, errors.Wrap(err, errMessage) } prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) if err != nil { return nil, errors.Wrap(err, errMessage) } tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) if err != nil { return nil, errors.Wrap(err, errMessage) } return []tls.Certificate{tlsCert}, nil } // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { tlsConfig := ClientDefault() tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) if err != nil { return nil, err } tlsConfig.RootCAs = CAs } tlsCerts, err := getCert(options) if err != nil { return nil, err } tlsConfig.Certificates = tlsCerts if err := adjustMinVersion(options, tlsConfig); err != nil { return nil, err } return tlsConfig, nil } // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { tlsConfig := ServerDefault() tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) if err != nil { return nil, err } tlsConfig.ClientCAs = CAs } if err := adjustMinVersion(options, tlsConfig); err != nil { return nil, err } return tlsConfig, nil }
9,567
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
// +build go1.7 package tlsconfig import ( "crypto/x509" "runtime" ) // SystemCertPool returns a copy of the system cert pool, // returns an error if failed to load or empty pool on windows. func SystemCertPool() (*x509.CertPool, error) { certpool, err := x509.SystemCertPool() if err != nil && runtime.GOOS == "windows" { return x509.NewCertPool(), nil } return certpool, err }
9,568
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
// +build !go1.7 package tlsconfig import ( "crypto/x509" ) // SystemCertPool returns an new empty cert pool, // accessing system cert pool is supported in go 1.7 func SystemCertPool() (*x509.CertPool, error) { return x509.NewCertPool(), nil }
9,569
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
// +build go1.5 // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // package tlsconfig import ( "crypto/tls" ) // Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) var clientCipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, }
9,570
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
// +build !go1.5 // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // package tlsconfig import ( "crypto/tls" ) // Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) var clientCipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, }
9,571
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/nat/parse.go
package nat import ( "fmt" "strconv" "strings" ) // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 // DEPRECATED: do not use, this function may be removed in a future version func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( templateParts = strings.Split(template, ":") parts = strings.Split(data, ":") out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { value := "" if len(parts) > i { value = parts[i] } out[t] = value } return out, nil } // ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { return 0, 0, fmt.Errorf("Empty string specified for ports.") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) end := start return start, end, err } parts := strings.Split(ports, "-") start, err := strconv.ParseUint(parts[0], 10, 16) if err != nil { return 0, 0, err } end, err := strconv.ParseUint(parts[1], 10, 16) if err != nil { return 0, 0, err } if end < start { return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) } return start, end, nil }
9,572
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/nat/sort.go
package nat import ( "sort" "strings" ) type portSorter struct { ports []Port by func(i, j Port) bool } func (s *portSorter) Len() int { return len(s.ports) } func (s *portSorter) Swap(i, j int) { s.ports[i], s.ports[j] = s.ports[j], s.ports[i] } func (s *portSorter) Less(i, j int) bool { ip := s.ports[i] jp := s.ports[j] return s.by(ip, jp) } // Sort sorts a list of ports using the provided predicate // This function should compare `i` and `j`, returning true if `i` is // considered to be less than `j` func Sort(ports []Port, predicate func(i, j Port) bool) { s := &portSorter{ports, predicate} sort.Sort(s) } type portMapEntry struct { port Port binding PortBinding } type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // sort the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol func (s portMapSorter) Less(i, j int) bool { pi, pj := s[i].port, s[j].port hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") } // SortPortMap sorts the list of ports and their respected mapping. The ports // will explicit HostPort will be placed first. func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { if binding, ok := bindings[p]; ok { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } bindings[p] = []PortBinding{} } else { s = append(s, portMapEntry{port: p}) } } sort.Sort(s) var ( i int pm = make(map[Port]struct{}) ) // reorder ports for _, entry := range s { if _, ok := pm[entry.port]; !ok { ports[i] = entry.port pm[entry.port] = struct{}{} i++ } // reorder bindings for this port if _, ok := bindings[entry.port]; ok { bindings[entry.port] = append(bindings[entry.port], entry.binding) } } } func toInt(s string) uint64 { i, _, err := ParsePortRange(s) if err != nil { i = 0 } return i }
9,573
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/nat/nat.go
// Package nat is a convenience package for manipulation of strings describing network ports. package nat import ( "fmt" "net" "strconv" "strings" ) const ( // portSpecTemplate is the expected format for port specifications portSpecTemplate = "ip:hostPort:containerPort" ) // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address HostIP string `json:"HostIp"` // HostPort is the host port number HostPort string } // PortMap is a collection of PortBinding indexed by Port type PortMap map[Port][]PortBinding // PortSet is a collection of structs indexed by Port type PortSet map[Port]struct{} // Port is a string containing port number and protocol in the format "80/tcp" type Port string // NewPort creates a new instance of a Port given a protocol and port number or port range func NewPort(proto, port string) (Port, error) { // Check for parsing issues on "port" now so we can avoid having // to check it later on. portStartInt, portEndInt, err := ParsePortRangeToInt(port) if err != nil { return "", err } if portStartInt == portEndInt { return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil } return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil } // ParsePort parses the port number string and returns an int func ParsePort(rawPort string) (int, error) { if len(rawPort) == 0 { return 0, nil } port, err := strconv.ParseUint(rawPort, 10, 16) if err != nil { return 0, err } return int(port), nil } // ParsePortRangeToInt parses the port range string and returns start/end ints func ParsePortRangeToInt(rawPort string) (int, int, error) { if len(rawPort) == 0 { return 0, 0, nil } start, end, err := ParsePortRange(rawPort) if err != nil { return 0, 0, err } return int(start), int(end), nil } // Proto returns the protocol of a Port func (p Port) Proto() string { proto, _ := SplitProtoPort(string(p)) return proto } // Port returns the port number of a Port func (p Port) Port() string { _, port := SplitProtoPort(string(p)) return port } // Int returns the port number of a Port as an int func (p Port) Int() int { portStr := p.Port() // We don't need to check for an error because we're going to // assume that any error would have been found, and reported, in NewPort() port, _ := ParsePort(portStr) return port } // Range returns the start/end port numbers of a Port range as ints func (p Port) Range() (int, int, error) { return ParsePortRangeToInt(p.Port()) } // SplitProtoPort splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") l := len(parts) if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { return "", "" } if l == 1 { return "tcp", rawPort } if len(parts[1]) == 0 { return "tcp", parts[0] } return parts[1], parts[0] } func validateProto(proto string) bool { for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } } return false } // ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses // these in to the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { var ( exposedPorts = make(map[Port]struct{}, len(ports)) bindings = make(map[Port][]PortBinding) ) for _, rawPort := range ports { portMappings, err := ParsePortSpec(rawPort) if err != nil { return nil, nil, err } for _, portMapping := range portMappings { port := portMapping.Port if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} } bslice, exists := bindings[port] if !exists { bslice = []PortBinding{} } bindings[port] = append(bslice, portMapping.Binding) } } return exposedPorts, bindings, nil } // PortMapping is a data object mapping a Port to a PortBinding type PortMapping struct { Port Port Binding PortBinding } func splitParts(rawport string) (string, string, string) { parts := strings.Split(rawport, ":") n := len(parts) containerport := parts[n-1] switch n { case 1: return "", "", containerport case 2: return "", parts[0], containerport case 3: return parts[0], parts[1], containerport default: return strings.Join(parts[:n-2], ":"), parts[n-2], containerport } } // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { var proto string rawIP, hostPort, containerPort := splitParts(rawPort) proto, containerPort = SplitProtoPort(containerPort) // Strip [] from IPV6 addresses ip, _, err := net.SplitHostPort(rawIP + ":") if err != nil { return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) } if ip != "" && net.ParseIP(ip) == nil { return nil, fmt.Errorf("Invalid ip address: %s", ip) } if containerPort == "" { return nil, fmt.Errorf("No port specified: %s<empty>", rawPort) } startPort, endPort, err := ParsePortRange(containerPort) if err != nil { return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } } if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { // Allow host port range iff containerPort is not a range. // In this case, use the host port range as the dynamic // host port range to allocate into. if endPort != startPort { return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } } if !validateProto(strings.ToLower(proto)) { return nil, fmt.Errorf("Invalid proto: %s", proto) } ports := []PortMapping{} for i := uint64(0); i <= (endPort - startPort); i++ { containerPort = strconv.FormatUint(startPort+i, 10) if len(hostPort) > 0 { hostPort = strconv.FormatUint(startHostPort+i, 10) } // Set hostPort to a range only if there is a single container port // and a dynamic host port. if startPort == endPort && startHostPort != endHostPort { hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) } port, err := NewPort(strings.ToLower(proto), containerPort) if err != nil { return nil, err } binding := PortBinding{ HostIP: ip, HostPort: hostPort, } ports = append(ports, PortMapping{Port: port, Binding: binding}) } return ports, nil }
9,574
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
package sockets import ( "errors" "net" "sync" ) var errClosed = errors.New("use of closed network connection") // InmemSocket implements net.Listener using in-memory only connections. type InmemSocket struct { chConn chan net.Conn chClose chan struct{} addr string mu sync.Mutex } // dummyAddr is used to satisfy net.Addr for the in-mem socket // it is just stored as a string and returns the string for all calls type dummyAddr string // NewInmemSocket creates an in-memory only net.Listener // The addr argument can be any string, but is used to satisfy the `Addr()` part // of the net.Listener interface func NewInmemSocket(addr string, bufSize int) *InmemSocket { return &InmemSocket{ chConn: make(chan net.Conn, bufSize), chClose: make(chan struct{}), addr: addr, } } // Addr returns the socket's addr string to satisfy net.Listener func (s *InmemSocket) Addr() net.Addr { return dummyAddr(s.addr) } // Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. func (s *InmemSocket) Accept() (net.Conn, error) { select { case conn := <-s.chConn: return conn, nil case <-s.chClose: return nil, errClosed } } // Close closes the listener. It will be unavailable for use once closed. func (s *InmemSocket) Close() error { s.mu.Lock() defer s.mu.Unlock() select { case <-s.chClose: default: close(s.chClose) } return nil } // Dial is used to establish a connection with the in-mem server func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { srvConn, clientConn := net.Pipe() select { case s.chConn <- srvConn: case <-s.chClose: return nil, errClosed } return clientConn, nil } // Network returns the addr string, satisfies net.Addr func (a dummyAddr) Network() string { return string(a) } // String returns the string form func (a dummyAddr) String() string { return string(a) }
9,575
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/sockets.go
// Package sockets provides helper functions to create and configure Unix or TCP sockets. package sockets import ( "errors" "net" "net/http" "time" ) // Why 32? See https://github.com/docker/docker/pull/8035. const defaultTimeout = 32 * time.Second // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") // ConfigureTransport configures the specified Transport according to the // specified proto and addr. // If the proto is unix (using a unix socket to communicate) or npipe the // compression is disabled. func ConfigureTransport(tr *http.Transport, proto, addr string) error { switch proto { case "unix": return configureUnixTransport(tr, proto, addr) case "npipe": return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment dialer, err := DialerFromEnvironment(&net.Dialer{ Timeout: defaultTimeout, }) if err != nil { return err } tr.Dial = dialer.Dial } return nil }
9,576
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
// +build !windows package sockets import ( "fmt" "net" "net/http" "syscall" "time" ) const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { return fmt.Errorf("Unix socket path %q is too long", addr) } // No need for compression in local communications. tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, defaultTimeout) } return nil } func configureNpipeTransport(tr *http.Transport, proto, addr string) error { return ErrProtocolNotAvailable } // DialPipe connects to a Windows named pipe. // This is not supported on other OSes. func DialPipe(_ string, _ time.Duration) (net.Conn, error) { return nil, syscall.EAFNOSUPPORT }
9,577
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/proxy.go
package sockets import ( "net" "net/url" "os" "strings" "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of // proxy-related variables. See the Go specification for details on these // variables. https://golang.org/pkg/net/http/ func GetProxyEnv(key string) string { proxyValue := os.Getenv(strings.ToUpper(key)) if proxyValue == "" { return os.Getenv(strings.ToLower(key)) } return proxyValue } // DialerFromEnvironment takes in a "direct" *net.Dialer and returns a // proxy.Dialer which will route the connections through the proxy using the // given dialer. func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { allProxy := GetProxyEnv("all_proxy") if len(allProxy) == 0 { return direct, nil } proxyURL, err := url.Parse(allProxy) if err != nil { return direct, err } proxyFromURL, err := proxy.FromURL(proxyURL, direct) if err != nil { return direct, err } noProxy := GetProxyEnv("no_proxy") if len(noProxy) == 0 { return proxyFromURL, nil } perHost := proxy.NewPerHost(proxyFromURL, direct) perHost.AddFromString(noProxy) return perHost, nil }
9,578
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/unix_socket.go
// +build !windows package sockets import ( "net" "os" "syscall" ) // NewUnixSocket creates a unix socket with the specified path and group. func NewUnixSocket(path string, gid int) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := net.Listen("unix", path) if err != nil { return nil, err } if err := os.Chown(path, 0, gid); err != nil { l.Close() return nil, err } if err := os.Chmod(path, 0660); err != nil { l.Close() return nil, err } return l, nil }
9,579
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
package sockets import ( "net" "net/http" "time" "github.com/Microsoft/go-winio" ) func configureUnixTransport(tr *http.Transport, proto, addr string) error { return ErrProtocolNotAvailable } func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return DialPipe(addr, defaultTimeout) } return nil } // DialPipe connects to a Windows named pipe. func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { return winio.DialPipe(addr, &timeout) }
9,580
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
// Package sockets provides helper functions to create and configure Unix or TCP sockets. package sockets import ( "crypto/tls" "net" ) // NewTCPSocket creates a TCP socket listener with the specified address and // the specified tls configuration. If TLSConfig is set, will encapsulate the // TCP listener inside a TLS one. func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { l, err := net.Listen("tcp", addr) if err != nil { return nil, err } if tlsConfig != nil { tlsConfig.NextProtos = []string{"http/1.1"} l = tls.NewListener(l, tlsConfig) } return l, nil }
9,581
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/MAINTAINERS
# go-units maintainers file # # This file describes who runs the docker/go-units project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "akihirosuda", "dnephin", "thajeztah", "vdemeester", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.akihirosuda] Name = "Akihiro Suda" Email = "[email protected]" GitHub = "AkihiroSuda" [people.dnephin] Name = "Daniel Nephin" Email = "[email protected]" GitHub = "dnephin" [people.thajeztah] Name = "Sebastiaan van Stijn" Email = "[email protected]" GitHub = "thaJeztah" [people.vdemeester] Name = "Vincent Demeester" Email = "[email protected]" GitHub = "vdemeester"
9,582
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/duration.go
// Package units provides helper function to parse and print size and time units // in human-readable format. package units import ( "fmt" "time" ) // HumanDuration returns a human-readable approximation of a duration // (eg. "About a minute", "4 hours ago", etc.). func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" } else if seconds == 1 { return "1 second" } else if seconds < 60 { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours() + 0.5); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) } else if hours < 24*30*2 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) } return fmt.Sprintf("%d years", int(d.Hours())/24/365) }
9,583
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/ulimit.go
package units import ( "fmt" "strconv" "strings" ) // Ulimit is a human friendly version of Rlimit. type Ulimit struct { Name string Hard int64 Soft int64 } // Rlimit specifies the resource limits, such as max open files. type Rlimit struct { Type int `json:"type,omitempty"` Hard uint64 `json:"hard,omitempty"` Soft uint64 `json:"soft,omitempty"` } const ( // magic numbers for making the syscall // some of these are defined in the syscall package, but not all. // Also since Windows client doesn't get access to the syscall package, need to // define these here rlimitAs = 9 rlimitCore = 4 rlimitCPU = 0 rlimitData = 2 rlimitFsize = 1 rlimitLocks = 10 rlimitMemlock = 8 rlimitMsgqueue = 12 rlimitNice = 13 rlimitNofile = 7 rlimitNproc = 6 rlimitRss = 5 rlimitRtprio = 14 rlimitRttime = 15 rlimitSigpending = 11 rlimitStack = 3 ) var ulimitNameMapping = map[string]int{ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. "core": rlimitCore, "cpu": rlimitCPU, "data": rlimitData, "fsize": rlimitFsize, "locks": rlimitLocks, "memlock": rlimitMemlock, "msgqueue": rlimitMsgqueue, "nice": rlimitNice, "nofile": rlimitNofile, "nproc": rlimitNproc, "rss": rlimitRss, "rtprio": rlimitRtprio, "rttime": rlimitRttime, "sigpending": rlimitSigpending, "stack": rlimitStack, } // ParseUlimit parses and returns a Ulimit from the specified string. func ParseUlimit(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) } if _, exists := ulimitNameMapping[parts[0]]; !exists { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } var ( soft int64 hard = &soft // default to soft in case no hard was set temp int64 err error ) switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { case 2: temp, err = strconv.ParseInt(limitVals[1], 10, 64) if err != nil { return nil, err } hard = &temp fallthrough case 1: soft, err = strconv.ParseInt(limitVals[0], 10, 64) if err != nil { return nil, err } default: return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } if *hard != -1 { if soft == -1 { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) } if soft > *hard { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) } } return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. func (u *Ulimit) GetRlimit() (*Rlimit, error) { t, exists := ulimitNameMapping[u.Name] if !exists { return nil, fmt.Errorf("invalid ulimit name %s", u.Name) } return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil } func (u *Ulimit) String() string { return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) }
9,584
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/README.md
[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) # Introduction go-units is a library to transform human friendly measurements into machine friendly values. ## Usage See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. ## Copyright and license Copyright © 2015 Docker, Inc. go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full text of the license.
9,585
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/size.go
package units import ( "fmt" "regexp" "strconv" "strings" ) // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal KB = 1000 MB = 1000 * KB GB = 1000 * MB TB = 1000 * GB PB = 1000 * TB // Binary KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB TiB = 1024 * GiB PiB = 1024 * TiB ) type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 unitsLimit := len(_map) - 1 for size >= base && i < unitsLimit { size = size / base i++ } return size, _map[i] } // CustomSize returns a human-readable approximation of a size // using custom format. func CustomSize(format string, size float64, base float64, _map []string) string { size, unit := getSizeAndUnit(size, base, _map) return fmt.Sprintf(format, size, unit) } // HumanSizeWithPrecision allows the size to be in any precision, // instead of 4 digit precision used in units.HumanSize. func HumanSizeWithPrecision(size float64, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) return fmt.Sprintf("%.*g%s", precision, size, unit) } // HumanSize returns a human-readable approximation of a size // capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). func HumanSize(size float64) string { return HumanSizeWithPrecision(size, 4) } // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a // size using SI standard (eg. "44kB", "17MB"). func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } // RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. func RAMInBytes(size string) (int64, error) { return parseSize(size, binaryMap) } // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 4 { return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseFloat(matches[1], 64) if err != nil { return -1, err } unitPrefix := strings.ToLower(matches[3]) if mul, ok := uMap[unitPrefix]; ok { size *= float64(mul) } return int64(size), nil }
9,586
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/circle.yml
dependencies: post: # install golint - go get golang.org/x/lint/golint test: pre: # run analysis before tests - go vet ./... - test -z "$(golint ./... | tee /dev/stderr)" - test -z "$(gofmt -s -l . | tee /dev/stderr)"
9,587
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/LICENSE
Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,588
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/go-units/CONTRIBUTING.md
# Contributing to go-units Want to hack on go-units? Awesome! Here are instructions to get you started. go-units is a part of the [Docker](https://www.docker.com) project, and follows the same rules and principles. If you're already familiar with the way Docker does things, you'll feel right at home. Otherwise, go read Docker's [contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), [issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), [review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and [branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). ### Sign your work The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith <[email protected]> Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`.
9,589
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/handlers.go
package spdystream import ( "io" "net/http" ) // MirrorStreamHandler mirrors all streams. func MirrorStreamHandler(stream *Stream) { replyErr := stream.SendReply(http.Header{}, false) if replyErr != nil { return } go func() { io.Copy(stream, stream) stream.Close() }() go func() { for { header, receiveErr := stream.ReceiveHeader() if receiveErr != nil { return } sendErr := stream.SendHeader(header, false) if sendErr != nil { return } } }() } // NoopStreamHandler does nothing when stream connects, most // likely used with RejectAuthHandler which will not allow any // streams to make it to the stream handler. func NoOpStreamHandler(stream *Stream) { stream.SendReply(http.Header{}, false) }
9,590
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/MAINTAINERS
# Spdystream maintainers file # # This file describes who runs the docker/spdystream project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "dmcgowan", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.dmcgowan] Name = "Derek McGowan" Email = "[email protected]" GitHub = "dmcgowan"
9,591
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/README.md
# SpdyStream A multiplexed stream library using spdy ## Usage Client example (connecting to mirroring server without auth) ```go package main import ( "fmt" "github.com/docker/spdystream" "net" "net/http" ) func main() { conn, err := net.Dial("tcp", "localhost:8080") if err != nil { panic(err) } spdyConn, err := spdystream.NewConnection(conn, false) if err != nil { panic(err) } go spdyConn.Serve(spdystream.NoOpStreamHandler) stream, err := spdyConn.CreateStream(http.Header{}, nil, false) if err != nil { panic(err) } stream.Wait() fmt.Fprint(stream, "Writing to stream") buf := make([]byte, 25) stream.Read(buf) fmt.Println(string(buf)) stream.Close() } ``` Server example (mirroring server without auth) ```go package main import ( "github.com/docker/spdystream" "net" ) func main() { listener, err := net.Listen("tcp", "localhost:8080") if err != nil { panic(err) } for { conn, err := listener.Accept() if err != nil { panic(err) } spdyConn, err := spdystream.NewConnection(conn, true) if err != nil { panic(err) } go spdyConn.Serve(spdystream.MirrorStreamHandler) } } ``` ## Copyright and license Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
9,592
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/connection.go
package spdystream import ( "errors" "fmt" "io" "net" "net/http" "sync" "time" "github.com/docker/spdystream/spdy" ) var ( ErrInvalidStreamId = errors.New("Invalid stream id") ErrTimeout = errors.New("Timeout occured") ErrReset = errors.New("Stream reset") ErrWriteClosedStream = errors.New("Write on closed stream") ) const ( FRAME_WORKERS = 5 QUEUE_SIZE = 50 ) type StreamHandler func(stream *Stream) type AuthHandler func(header http.Header, slot uint8, parent uint32) bool type idleAwareFramer struct { f *spdy.Framer conn *Connection writeLock sync.Mutex resetChan chan struct{} setTimeoutLock sync.Mutex setTimeoutChan chan time.Duration timeout time.Duration } func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { iaf := &idleAwareFramer{ f: framer, resetChan: make(chan struct{}, 2), // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about // the same time the connection is being closed setTimeoutChan: make(chan time.Duration, 1), } return iaf } func (i *idleAwareFramer) monitor() { var ( timer *time.Timer expired <-chan time.Time resetChan = i.resetChan setTimeoutChan = i.setTimeoutChan ) Loop: for { select { case timeout := <-i.setTimeoutChan: i.timeout = timeout if timeout == 0 { if timer != nil { timer.Stop() } } else { if timer == nil { timer = time.NewTimer(timeout) expired = timer.C } else { timer.Reset(timeout) } } case <-resetChan: if timer != nil && i.timeout > 0 { timer.Reset(i.timeout) } case <-expired: i.conn.streamCond.L.Lock() streams := i.conn.streams i.conn.streams = make(map[spdy.StreamId]*Stream) i.conn.streamCond.Broadcast() i.conn.streamCond.L.Unlock() go func() { for _, stream := range streams { stream.resetStream() } i.conn.Close() }() case <-i.conn.closeChan: if timer != nil { timer.Stop() } // Start a goroutine to drain resetChan. This is needed because we've seen // some unit tests with large numbers of goroutines get into a situation // where resetChan fills up, at least 1 call to Write() is still trying to // send to resetChan, the connection gets closed, and this case statement // attempts to grab the write lock that Write() already has, causing a // deadlock. // // See https://github.com/docker/spdystream/issues/49 for more details. go func() { for _ = range resetChan { } }() go func() { for _ = range setTimeoutChan { } }() i.writeLock.Lock() close(resetChan) i.resetChan = nil i.writeLock.Unlock() i.setTimeoutLock.Lock() close(i.setTimeoutChan) i.setTimeoutChan = nil i.setTimeoutLock.Unlock() break Loop } } // Drain resetChan for _ = range resetChan { } } func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { i.writeLock.Lock() defer i.writeLock.Unlock() if i.resetChan == nil { return io.EOF } err := i.f.WriteFrame(frame) if err != nil { return err } i.resetChan <- struct{}{} return nil } func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { frame, err := i.f.ReadFrame() if err != nil { return nil, err } // resetChan should never be closed since it is only closed // when the connection has closed its closeChan. This closure // only occurs after all Reads have finished // TODO (dmcgowan): refactor relationship into connection i.resetChan <- struct{}{} return frame, nil } func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { i.setTimeoutLock.Lock() defer i.setTimeoutLock.Unlock() if i.setTimeoutChan == nil { return } i.setTimeoutChan <- timeout } type Connection struct { conn net.Conn framer *idleAwareFramer closeChan chan bool goneAway bool lastStreamChan chan<- *Stream goAwayTimeout time.Duration closeTimeout time.Duration streamLock *sync.RWMutex streamCond *sync.Cond streams map[spdy.StreamId]*Stream nextIdLock sync.Mutex receiveIdLock sync.Mutex nextStreamId spdy.StreamId receivedStreamId spdy.StreamId pingIdLock sync.Mutex pingId uint32 pingChans map[uint32]chan error shutdownLock sync.Mutex shutdownChan chan error hasShutdown bool // for testing https://github.com/docker/spdystream/pull/56 dataFrameHandler func(*spdy.DataFrame) error } // NewConnection creates a new spdy connection from an existing // network connection. func NewConnection(conn net.Conn, server bool) (*Connection, error) { framer, framerErr := spdy.NewFramer(conn, conn) if framerErr != nil { return nil, framerErr } idleAwareFramer := newIdleAwareFramer(framer) var sid spdy.StreamId var rid spdy.StreamId var pid uint32 if server { sid = 2 rid = 1 pid = 2 } else { sid = 1 rid = 2 pid = 1 } streamLock := new(sync.RWMutex) streamCond := sync.NewCond(streamLock) session := &Connection{ conn: conn, framer: idleAwareFramer, closeChan: make(chan bool), goAwayTimeout: time.Duration(0), closeTimeout: time.Duration(0), streamLock: streamLock, streamCond: streamCond, streams: make(map[spdy.StreamId]*Stream), nextStreamId: sid, receivedStreamId: rid, pingId: pid, pingChans: make(map[uint32]chan error), shutdownChan: make(chan error), } session.dataFrameHandler = session.handleDataFrame idleAwareFramer.conn = session go idleAwareFramer.monitor() return session, nil } // Ping sends a ping frame across the connection and // returns the response time func (s *Connection) Ping() (time.Duration, error) { pid := s.pingId s.pingIdLock.Lock() if s.pingId > 0x7ffffffe { s.pingId = s.pingId - 0x7ffffffe } else { s.pingId = s.pingId + 2 } s.pingIdLock.Unlock() pingChan := make(chan error) s.pingChans[pid] = pingChan defer delete(s.pingChans, pid) frame := &spdy.PingFrame{Id: pid} startTime := time.Now() writeErr := s.framer.WriteFrame(frame) if writeErr != nil { return time.Duration(0), writeErr } select { case <-s.closeChan: return time.Duration(0), errors.New("connection closed") case err, ok := <-pingChan: if ok && err != nil { return time.Duration(0), err } break } return time.Now().Sub(startTime), nil } // Serve handles frames sent from the server, including reply frames // which are needed to fully initiate connections. Both clients and servers // should call Serve in a separate goroutine before creating streams. func (s *Connection) Serve(newHandler StreamHandler) { // use a WaitGroup to wait for all frames to be drained after receiving // go-away. var wg sync.WaitGroup // Parition queues to ensure stream frames are handled // by the same worker, ensuring order is maintained frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) for i := 0; i < FRAME_WORKERS; i++ { frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) // Ensure frame queue is drained when connection is closed go func(frameQueue *PriorityFrameQueue) { <-s.closeChan frameQueue.Drain() }(frameQueues[i]) wg.Add(1) go func(frameQueue *PriorityFrameQueue) { // let the WaitGroup know this worker is done defer wg.Done() s.frameHandler(frameQueue, newHandler) }(frameQueues[i]) } var ( partitionRoundRobin int goAwayFrame *spdy.GoAwayFrame ) Loop: for { readFrame, err := s.framer.ReadFrame() if err != nil { if err != io.EOF { fmt.Errorf("frame read error: %s", err) } else { debugMessage("(%p) EOF received", s) } break } var priority uint8 var partition int switch frame := readFrame.(type) { case *spdy.SynStreamFrame: if s.checkStreamFrame(frame) { priority = frame.Priority partition = int(frame.StreamId % FRAME_WORKERS) debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) s.addStreamFrame(frame) } else { debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) continue } case *spdy.SynReplyFrame: priority = s.getStreamPriority(frame.StreamId) partition = int(frame.StreamId % FRAME_WORKERS) case *spdy.DataFrame: priority = s.getStreamPriority(frame.StreamId) partition = int(frame.StreamId % FRAME_WORKERS) case *spdy.RstStreamFrame: priority = s.getStreamPriority(frame.StreamId) partition = int(frame.StreamId % FRAME_WORKERS) case *spdy.HeadersFrame: priority = s.getStreamPriority(frame.StreamId) partition = int(frame.StreamId % FRAME_WORKERS) case *spdy.PingFrame: priority = 0 partition = partitionRoundRobin partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS case *spdy.GoAwayFrame: // hold on to the go away frame and exit the loop goAwayFrame = frame break Loop default: priority = 7 partition = partitionRoundRobin partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS } frameQueues[partition].Push(readFrame, priority) } close(s.closeChan) // wait for all frame handler workers to indicate they've drained their queues // before handling the go away frame wg.Wait() if goAwayFrame != nil { s.handleGoAwayFrame(goAwayFrame) } // now it's safe to close remote channels and empty s.streams s.streamCond.L.Lock() // notify streams that they're now closed, which will // unblock any stream Read() calls for _, stream := range s.streams { stream.closeRemoteChannels() } s.streams = make(map[spdy.StreamId]*Stream) s.streamCond.Broadcast() s.streamCond.L.Unlock() } func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { for { popFrame := frameQueue.Pop() if popFrame == nil { return } var frameErr error switch frame := popFrame.(type) { case *spdy.SynStreamFrame: frameErr = s.handleStreamFrame(frame, newHandler) case *spdy.SynReplyFrame: frameErr = s.handleReplyFrame(frame) case *spdy.DataFrame: frameErr = s.dataFrameHandler(frame) case *spdy.RstStreamFrame: frameErr = s.handleResetFrame(frame) case *spdy.HeadersFrame: frameErr = s.handleHeaderFrame(frame) case *spdy.PingFrame: frameErr = s.handlePingFrame(frame) case *spdy.GoAwayFrame: frameErr = s.handleGoAwayFrame(frame) default: frameErr = fmt.Errorf("unhandled frame type: %T", frame) } if frameErr != nil { fmt.Errorf("frame handling error: %s", frameErr) } } } func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { stream, streamOk := s.getStream(streamId) if !streamOk { return 7 } return stream.priority } func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { var parent *Stream if frame.AssociatedToStreamId != spdy.StreamId(0) { parent, _ = s.getStream(frame.AssociatedToStreamId) } stream := &Stream{ streamId: frame.StreamId, parent: parent, conn: s, startChan: make(chan error), headers: frame.Headers, finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, replyCond: sync.NewCond(new(sync.Mutex)), dataChan: make(chan []byte), headerChan: make(chan http.Header), closeChan: make(chan bool), } if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { stream.closeRemoteChannels() } s.addStream(stream) } // checkStreamFrame checks to see if a stream frame is allowed. // If the stream is invalid, then a reset frame with protocol error // will be returned. func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { s.receiveIdLock.Lock() defer s.receiveIdLock.Unlock() if s.goneAway { return false } validationErr := s.validateStreamId(frame.StreamId) if validationErr != nil { go func() { resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) if resetErr != nil { fmt.Errorf("reset error: %s", resetErr) } }() return false } return true } func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { stream, ok := s.getStream(frame.StreamId) if !ok { return fmt.Errorf("Missing stream: %d", frame.StreamId) } newHandler(stream) return nil } func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) stream, streamOk := s.getStream(frame.StreamId) if !streamOk { debugMessage("Reply frame gone away for %d", frame.StreamId) // Stream has already gone away return nil } if stream.replied { // Stream has already received reply return nil } stream.replied = true // TODO Check for error if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { s.remoteStreamFinish(stream) } close(stream.startChan) return nil } func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { stream, streamOk := s.getStream(frame.StreamId) if !streamOk { // Stream has already been removed return nil } s.removeStream(stream) stream.closeRemoteChannels() if !stream.replied { stream.replied = true stream.startChan <- ErrReset close(stream.startChan) } stream.finishLock.Lock() stream.finished = true stream.finishLock.Unlock() return nil } func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { stream, streamOk := s.getStream(frame.StreamId) if !streamOk { // Stream has already gone away return nil } if !stream.replied { // No reply received...Protocol error? return nil } // TODO limit headers while not blocking (use buffered chan or goroutine?) select { case <-stream.closeChan: return nil case stream.headerChan <- frame.Headers: } if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { s.remoteStreamFinish(stream) } return nil } func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { debugMessage("(%p) Data frame received for %d", s, frame.StreamId) stream, streamOk := s.getStream(frame.StreamId) if !streamOk { debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) // Stream has already gone away return nil } if !stream.replied { debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) // No reply received...Protocol error? return nil } debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) if len(frame.Data) > 0 { stream.dataLock.RLock() select { case <-stream.closeChan: debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) case stream.dataChan <- frame.Data: debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) } stream.dataLock.RUnlock() } if (frame.Flags & spdy.DataFlagFin) != 0x00 { s.remoteStreamFinish(stream) } return nil } func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { if s.pingId&0x01 != frame.Id&0x01 { return s.framer.WriteFrame(frame) } pingChan, pingOk := s.pingChans[frame.Id] if pingOk { close(pingChan) } return nil } func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { debugMessage("(%p) Go away received", s) s.receiveIdLock.Lock() if s.goneAway { s.receiveIdLock.Unlock() return nil } s.goneAway = true s.receiveIdLock.Unlock() if s.lastStreamChan != nil { stream, _ := s.getStream(frame.LastGoodStreamId) go func() { s.lastStreamChan <- stream }() } // Do not block frame handler waiting for closure go s.shutdown(s.goAwayTimeout) return nil } func (s *Connection) remoteStreamFinish(stream *Stream) { stream.closeRemoteChannels() stream.finishLock.Lock() if stream.finished { // Stream is fully closed, cleanup s.removeStream(stream) } stream.finishLock.Unlock() } // CreateStream creates a new spdy stream using the parameters for // creating the stream frame. The stream frame will be sent upon // calling this function, however this function does not wait for // the reply frame. If waiting for the reply is desired, use // the stream Wait or WaitTimeout function on the stream returned // by this function. func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { // MUST synchronize stream creation (all the way to writing the frame) // as stream IDs **MUST** increase monotonically. s.nextIdLock.Lock() defer s.nextIdLock.Unlock() streamId := s.getNextStreamId() if streamId == 0 { return nil, fmt.Errorf("Unable to get new stream id") } stream := &Stream{ streamId: streamId, parent: parent, conn: s, startChan: make(chan error), headers: headers, dataChan: make(chan []byte), headerChan: make(chan http.Header), closeChan: make(chan bool), } debugMessage("(%p) (%p) Create stream", s, stream) s.addStream(stream) return stream, s.sendStream(stream, fin) } func (s *Connection) shutdown(closeTimeout time.Duration) { // TODO Ensure this isn't called multiple times s.shutdownLock.Lock() if s.hasShutdown { s.shutdownLock.Unlock() return } s.hasShutdown = true s.shutdownLock.Unlock() var timeout <-chan time.Time if closeTimeout > time.Duration(0) { timeout = time.After(closeTimeout) } streamsClosed := make(chan bool) go func() { s.streamCond.L.Lock() for len(s.streams) > 0 { debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) s.streamCond.Wait() } s.streamCond.L.Unlock() close(streamsClosed) }() var err error select { case <-streamsClosed: // No active streams, close should be safe err = s.conn.Close() case <-timeout: // Force ungraceful close err = s.conn.Close() // Wait for cleanup to clear active streams <-streamsClosed } if err != nil { duration := 10 * time.Minute time.AfterFunc(duration, func() { select { case err, ok := <-s.shutdownChan: if ok { fmt.Errorf("Unhandled close error after %s: %s", duration, err) } default: } }) s.shutdownChan <- err } close(s.shutdownChan) return } // Closes spdy connection by sending GoAway frame and initiating shutdown func (s *Connection) Close() error { s.receiveIdLock.Lock() if s.goneAway { s.receiveIdLock.Unlock() return nil } s.goneAway = true s.receiveIdLock.Unlock() var lastStreamId spdy.StreamId if s.receivedStreamId > 2 { lastStreamId = s.receivedStreamId - 2 } goAwayFrame := &spdy.GoAwayFrame{ LastGoodStreamId: lastStreamId, Status: spdy.GoAwayOK, } err := s.framer.WriteFrame(goAwayFrame) if err != nil { return err } go s.shutdown(s.closeTimeout) return nil } // CloseWait closes the connection and waits for shutdown // to finish. Note the underlying network Connection // is not closed until the end of shutdown. func (s *Connection) CloseWait() error { closeErr := s.Close() if closeErr != nil { return closeErr } shutdownErr, ok := <-s.shutdownChan if ok { return shutdownErr } return nil } // Wait waits for the connection to finish shutdown or for // the wait timeout duration to expire. This needs to be // called either after Close has been called or the GOAWAYFRAME // has been received. If the wait timeout is 0, this function // will block until shutdown finishes. If wait is never called // and a shutdown error occurs, that error will be logged as an // unhandled error. func (s *Connection) Wait(waitTimeout time.Duration) error { var timeout <-chan time.Time if waitTimeout > time.Duration(0) { timeout = time.After(waitTimeout) } select { case err, ok := <-s.shutdownChan: if ok { return err } case <-timeout: return ErrTimeout } return nil } // NotifyClose registers a channel to be called when the remote // peer inidicates connection closure. The last stream to be // received by the remote will be sent on the channel. The notify // timeout will determine the duration between go away received // and the connection being closed. func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { s.goAwayTimeout = timeout s.lastStreamChan = c } // SetCloseTimeout sets the amount of time close will wait for // streams to finish before terminating the underlying network // connection. Setting the timeout to 0 will cause close to // wait forever, which is the default. func (s *Connection) SetCloseTimeout(timeout time.Duration) { s.closeTimeout = timeout } // SetIdleTimeout sets the amount of time the connection may sit idle before // it is forcefully terminated. func (s *Connection) SetIdleTimeout(timeout time.Duration) { s.framer.setIdleTimeout(timeout) } func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { var flags spdy.ControlFlags if fin { flags = spdy.ControlFlagFin } headerFrame := &spdy.HeadersFrame{ StreamId: stream.streamId, Headers: headers, CFHeader: spdy.ControlFrameHeader{Flags: flags}, } return s.framer.WriteFrame(headerFrame) } func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { var flags spdy.ControlFlags if fin { flags = spdy.ControlFlagFin } replyFrame := &spdy.SynReplyFrame{ StreamId: stream.streamId, Headers: headers, CFHeader: spdy.ControlFrameHeader{Flags: flags}, } return s.framer.WriteFrame(replyFrame) } func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { resetFrame := &spdy.RstStreamFrame{ StreamId: streamId, Status: status, } return s.framer.WriteFrame(resetFrame) } func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { return s.sendResetFrame(status, stream.streamId) } func (s *Connection) sendStream(stream *Stream, fin bool) error { var flags spdy.ControlFlags if fin { flags = spdy.ControlFlagFin stream.finished = true } var parentId spdy.StreamId if stream.parent != nil { parentId = stream.parent.streamId } streamFrame := &spdy.SynStreamFrame{ StreamId: spdy.StreamId(stream.streamId), AssociatedToStreamId: spdy.StreamId(parentId), Headers: stream.headers, CFHeader: spdy.ControlFrameHeader{Flags: flags}, } return s.framer.WriteFrame(streamFrame) } // getNextStreamId returns the next sequential id // every call should produce a unique value or an error func (s *Connection) getNextStreamId() spdy.StreamId { sid := s.nextStreamId if sid > 0x7fffffff { return 0 } s.nextStreamId = s.nextStreamId + 2 return sid } // PeekNextStreamId returns the next sequential id and keeps the next id untouched func (s *Connection) PeekNextStreamId() spdy.StreamId { sid := s.nextStreamId return sid } func (s *Connection) validateStreamId(rid spdy.StreamId) error { if rid > 0x7fffffff || rid < s.receivedStreamId { return ErrInvalidStreamId } s.receivedStreamId = rid + 2 return nil } func (s *Connection) addStream(stream *Stream) { s.streamCond.L.Lock() s.streams[stream.streamId] = stream debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) s.streamCond.Broadcast() s.streamCond.L.Unlock() } func (s *Connection) removeStream(stream *Stream) { s.streamCond.L.Lock() delete(s.streams, stream.streamId) debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) s.streamCond.Broadcast() s.streamCond.L.Unlock() } func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { s.streamLock.RLock() stream, ok = s.streams[streamId] s.streamLock.RUnlock() return } // FindStream looks up the given stream id and either waits for the // stream to be found or returns nil if the stream id is no longer // valid. func (s *Connection) FindStream(streamId uint32) *Stream { var stream *Stream var ok bool s.streamCond.L.Lock() stream, ok = s.streams[spdy.StreamId(streamId)] debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) for !ok && streamId >= uint32(s.receivedStreamId) { s.streamCond.Wait() stream, ok = s.streams[spdy.StreamId(streamId)] } s.streamCond.L.Unlock() return stream } func (s *Connection) CloseChan() <-chan bool { return s.closeChan }
9,593
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/LICENSE.docs
Attribution-ShareAlike 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. Additional offer from the Licensor -- Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply. c. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ShareAlike. In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the "Licensor." Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org.
9,594
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/utils.go
package spdystream import ( "log" "os" ) var ( DEBUG = os.Getenv("DEBUG") ) func debugMessage(fmt string, args ...interface{}) { if DEBUG != "" { log.Printf(fmt, args...) } }
9,595
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2014-2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
9,596
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/CONTRIBUTING.md
# Contributing to SpdyStream Want to hack on spdystream? Awesome! Here are instructions to get you started. SpdyStream is a part of the [Docker](https://docker.io) project, and follows the same rules and principles. If you're already familiar with the way Docker does things, you'll feel right at home. Otherwise, go read [Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). Happy hacking!
9,597
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/stream.go
package spdystream import ( "errors" "fmt" "io" "net" "net/http" "sync" "time" "github.com/docker/spdystream/spdy" ) var ( ErrUnreadPartialData = errors.New("unread partial data") ) type Stream struct { streamId spdy.StreamId parent *Stream conn *Connection startChan chan error dataLock sync.RWMutex dataChan chan []byte unread []byte priority uint8 headers http.Header headerChan chan http.Header finishLock sync.Mutex finished bool replyCond *sync.Cond replied bool closeLock sync.Mutex closeChan chan bool } // WriteData writes data to stream, sending a dataframe per call func (s *Stream) WriteData(data []byte, fin bool) error { s.waitWriteReply() var flags spdy.DataFlags if fin { flags = spdy.DataFlagFin s.finishLock.Lock() if s.finished { s.finishLock.Unlock() return ErrWriteClosedStream } s.finished = true s.finishLock.Unlock() } dataFrame := &spdy.DataFrame{ StreamId: s.streamId, Flags: flags, Data: data, } debugMessage("(%p) (%d) Writing data frame", s, s.streamId) return s.conn.framer.WriteFrame(dataFrame) } // Write writes bytes to a stream, calling write data for each call. func (s *Stream) Write(data []byte) (n int, err error) { err = s.WriteData(data, false) if err == nil { n = len(data) } return } // Read reads bytes from a stream, a single read will never get more // than what is sent on a single data frame, but a multiple calls to // read may get data from the same data frame. func (s *Stream) Read(p []byte) (n int, err error) { if s.unread == nil { select { case <-s.closeChan: return 0, io.EOF case read, ok := <-s.dataChan: if !ok { return 0, io.EOF } s.unread = read } } n = copy(p, s.unread) if n < len(s.unread) { s.unread = s.unread[n:] } else { s.unread = nil } return } // ReadData reads an entire data frame and returns the byte array // from the data frame. If there is unread data from the result // of a Read call, this function will return an ErrUnreadPartialData. func (s *Stream) ReadData() ([]byte, error) { debugMessage("(%p) Reading data from %d", s, s.streamId) if s.unread != nil { return nil, ErrUnreadPartialData } select { case <-s.closeChan: return nil, io.EOF case read, ok := <-s.dataChan: if !ok { return nil, io.EOF } return read, nil } } func (s *Stream) waitWriteReply() { if s.replyCond != nil { s.replyCond.L.Lock() for !s.replied { s.replyCond.Wait() } s.replyCond.L.Unlock() } } // Wait waits for the stream to receive a reply. func (s *Stream) Wait() error { return s.WaitTimeout(time.Duration(0)) } // WaitTimeout waits for the stream to receive a reply or for timeout. // When the timeout is reached, ErrTimeout will be returned. func (s *Stream) WaitTimeout(timeout time.Duration) error { var timeoutChan <-chan time.Time if timeout > time.Duration(0) { timeoutChan = time.After(timeout) } select { case err := <-s.startChan: if err != nil { return err } break case <-timeoutChan: return ErrTimeout } return nil } // Close closes the stream by sending an empty data frame with the // finish flag set, indicating this side is finished with the stream. func (s *Stream) Close() error { select { case <-s.closeChan: // Stream is now fully closed s.conn.removeStream(s) default: break } return s.WriteData([]byte{}, true) } // Reset sends a reset frame, putting the stream into the fully closed state. func (s *Stream) Reset() error { s.conn.removeStream(s) return s.resetStream() } func (s *Stream) resetStream() error { // Always call closeRemoteChannels, even if s.finished is already true. // This makes it so that stream.Close() followed by stream.Reset() allows // stream.Read() to unblock. s.closeRemoteChannels() s.finishLock.Lock() if s.finished { s.finishLock.Unlock() return nil } s.finished = true s.finishLock.Unlock() resetFrame := &spdy.RstStreamFrame{ StreamId: s.streamId, Status: spdy.Cancel, } return s.conn.framer.WriteFrame(resetFrame) } // CreateSubStream creates a stream using the current as the parent func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { return s.conn.CreateStream(headers, s, fin) } // SetPriority sets the stream priority, does not affect the // remote priority of this stream after Open has been called. // Valid values are 0 through 7, 0 being the highest priority // and 7 the lowest. func (s *Stream) SetPriority(priority uint8) { s.priority = priority } // SendHeader sends a header frame across the stream func (s *Stream) SendHeader(headers http.Header, fin bool) error { return s.conn.sendHeaders(headers, s, fin) } // SendReply sends a reply on a stream, only valid to be called once // when handling a new stream func (s *Stream) SendReply(headers http.Header, fin bool) error { if s.replyCond == nil { return errors.New("cannot reply on initiated stream") } s.replyCond.L.Lock() defer s.replyCond.L.Unlock() if s.replied { return nil } err := s.conn.sendReply(headers, s, fin) if err != nil { return err } s.replied = true s.replyCond.Broadcast() return nil } // Refuse sends a reset frame with the status refuse, only // valid to be called once when handling a new stream. This // may be used to indicate that a stream is not allowed // when http status codes are not being used. func (s *Stream) Refuse() error { if s.replied { return nil } s.replied = true return s.conn.sendReset(spdy.RefusedStream, s) } // Cancel sends a reset frame with the status canceled. This // can be used at any time by the creator of the Stream to // indicate the stream is no longer needed. func (s *Stream) Cancel() error { return s.conn.sendReset(spdy.Cancel, s) } // ReceiveHeader receives a header sent on the other side // of the stream. This function will block until a header // is received or stream is closed. func (s *Stream) ReceiveHeader() (http.Header, error) { select { case <-s.closeChan: break case header, ok := <-s.headerChan: if !ok { return nil, fmt.Errorf("header chan closed") } return header, nil } return nil, fmt.Errorf("stream closed") } // Parent returns the parent stream func (s *Stream) Parent() *Stream { return s.parent } // Headers returns the headers used to create the stream func (s *Stream) Headers() http.Header { return s.headers } // String returns the string version of stream using the // streamId to uniquely identify the stream func (s *Stream) String() string { return fmt.Sprintf("stream:%d", s.streamId) } // Identifier returns a 32 bit identifier for the stream func (s *Stream) Identifier() uint32 { return uint32(s.streamId) } // IsFinished returns whether the stream has finished // sending data func (s *Stream) IsFinished() bool { return s.finished } // Implement net.Conn interface func (s *Stream) LocalAddr() net.Addr { return s.conn.conn.LocalAddr() } func (s *Stream) RemoteAddr() net.Addr { return s.conn.conn.RemoteAddr() } // TODO set per stream values instead of connection-wide func (s *Stream) SetDeadline(t time.Time) error { return s.conn.conn.SetDeadline(t) } func (s *Stream) SetReadDeadline(t time.Time) error { return s.conn.conn.SetReadDeadline(t) } func (s *Stream) SetWriteDeadline(t time.Time) error { return s.conn.conn.SetWriteDeadline(t) } func (s *Stream) closeRemoteChannels() { s.closeLock.Lock() defer s.closeLock.Unlock() select { case <-s.closeChan: default: close(s.closeChan) } }
9,598
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/priority.go
package spdystream import ( "container/heap" "sync" "github.com/docker/spdystream/spdy" ) type prioritizedFrame struct { frame spdy.Frame priority uint8 insertId uint64 } type frameQueue []*prioritizedFrame func (fq frameQueue) Len() int { return len(fq) } func (fq frameQueue) Less(i, j int) bool { if fq[i].priority == fq[j].priority { return fq[i].insertId < fq[j].insertId } return fq[i].priority < fq[j].priority } func (fq frameQueue) Swap(i, j int) { fq[i], fq[j] = fq[j], fq[i] } func (fq *frameQueue) Push(x interface{}) { *fq = append(*fq, x.(*prioritizedFrame)) } func (fq *frameQueue) Pop() interface{} { old := *fq n := len(old) *fq = old[0 : n-1] return old[n-1] } type PriorityFrameQueue struct { queue *frameQueue c *sync.Cond size int nextInsertId uint64 drain bool } func NewPriorityFrameQueue(size int) *PriorityFrameQueue { queue := make(frameQueue, 0, size) heap.Init(&queue) return &PriorityFrameQueue{ queue: &queue, size: size, c: sync.NewCond(&sync.Mutex{}), } } func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { q.c.L.Lock() defer q.c.L.Unlock() for q.queue.Len() >= q.size { q.c.Wait() } pFrame := &prioritizedFrame{ frame: frame, priority: priority, insertId: q.nextInsertId, } q.nextInsertId = q.nextInsertId + 1 heap.Push(q.queue, pFrame) q.c.Signal() } func (q *PriorityFrameQueue) Pop() spdy.Frame { q.c.L.Lock() defer q.c.L.Unlock() for q.queue.Len() == 0 { if q.drain { return nil } q.c.Wait() } frame := heap.Pop(q.queue).(*prioritizedFrame).frame q.c.Signal() return frame } func (q *PriorityFrameQueue) Drain() { q.c.L.Lock() defer q.c.L.Unlock() q.drain = true q.c.Broadcast() }
9,599