index
int64
0
0
repo_id
stringlengths
21
232
file_path
stringlengths
34
259
content
stringlengths
1
14.1M
__index_level_0__
int64
0
10k
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/spdy/read.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package spdy import ( "compress/zlib" "encoding/binary" "io" "net/http" "strings" ) func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { return f.readSynStreamFrame(h, frame) } func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { return f.readSynReplyFrame(h, frame) } func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { frame.CFHeader = h if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { return err } if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { return err } if frame.Status == 0 { return &Error{InvalidControlFrame, frame.StreamId} } if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } return nil } func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { frame.CFHeader = h var numSettings uint32 if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { return err } frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) for i := uint32(0); i < numSettings; i++ { if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { return err } frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) frame.FlagIdValues[i].Id &= 0xffffff if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { return err } } return nil } func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { frame.CFHeader = h if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { return err } if frame.Id == 0 { return &Error{ZeroStreamId, 0} } if frame.CFHeader.Flags != 0 { return &Error{InvalidControlFrame, StreamId(frame.Id)} } return nil } func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { frame.CFHeader = h if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { return err } if frame.CFHeader.Flags != 0 { return &Error{InvalidControlFrame, frame.LastGoodStreamId} } if frame.CFHeader.length != 8 { return &Error{InvalidControlFrame, frame.LastGoodStreamId} } if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { return err } return nil } func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { return f.readHeadersFrame(h, frame) } func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { frame.CFHeader = h if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { return err } if frame.CFHeader.Flags != 0 { return &Error{InvalidControlFrame, frame.StreamId} } if frame.CFHeader.length != 8 { return &Error{InvalidControlFrame, frame.StreamId} } if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { return err } return nil } func newControlFrame(frameType ControlFrameType) (controlFrame, error) { ctor, ok := cframeCtor[frameType] if !ok { return nil, &Error{Err: InvalidControlFrame} } return ctor(), nil } var cframeCtor = map[ControlFrameType]func() controlFrame{ TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, TypeSettings: func() controlFrame { return new(SettingsFrame) }, TypePing: func() controlFrame { return new(PingFrame) }, TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, TypeHeaders: func() controlFrame { return new(HeadersFrame) }, TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, } func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { if f.headerDecompressor != nil { f.headerReader.N = payloadSize return nil } f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) if err != nil { return err } f.headerDecompressor = decompressor return nil } // ReadFrame reads SPDY encoded data and returns a decompressed Frame. func (f *Framer) ReadFrame() (Frame, error) { var firstWord uint32 if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { return nil, err } if firstWord&0x80000000 != 0 { frameType := ControlFrameType(firstWord & 0xffff) version := uint16(firstWord >> 16 & 0x7fff) return f.parseControlFrame(version, frameType) } return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) } func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { var length uint32 if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { return nil, err } flags := ControlFlags((length & 0xff000000) >> 24) length &= 0xffffff header := ControlFrameHeader{version, frameType, flags, length} cframe, err := newControlFrame(frameType) if err != nil { return nil, err } if err = cframe.read(header, f); err != nil { return nil, err } return cframe, nil } func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { var numHeaders uint32 if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { return nil, err } var e error h := make(http.Header, int(numHeaders)) for i := 0; i < int(numHeaders); i++ { var length uint32 if err := binary.Read(r, binary.BigEndian, &length); err != nil { return nil, err } nameBytes := make([]byte, length) if _, err := io.ReadFull(r, nameBytes); err != nil { return nil, err } name := string(nameBytes) if name != strings.ToLower(name) { e = &Error{UnlowercasedHeaderName, streamId} name = strings.ToLower(name) } if h[name] != nil { e = &Error{DuplicateHeaders, streamId} } if err := binary.Read(r, binary.BigEndian, &length); err != nil { return nil, err } value := make([]byte, length) if _, err := io.ReadFull(r, value); err != nil { return nil, err } valueList := strings.Split(string(value), headerValueSeparator) for _, v := range valueList { h.Add(name, v) } } if e != nil { return h, e } return h, nil } func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { frame.CFHeader = h var err error if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { return err } if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { return err } if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { return err } frame.Priority >>= 5 if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { return err } reader := f.r if !f.headerCompressionDisabled { err := f.uncorkHeaderDecompressor(int64(h.length - 10)) if err != nil { return err } reader = f.headerDecompressor } frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } if err != nil { return err } for h := range frame.Headers { if invalidReqHeaders[h] { return &Error{InvalidHeaderPresent, frame.StreamId} } } if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } return nil } func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { frame.CFHeader = h var err error if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { return err } reader := f.r if !f.headerCompressionDisabled { err := f.uncorkHeaderDecompressor(int64(h.length - 4)) if err != nil { return err } reader = f.headerDecompressor } frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } if err != nil { return err } for h := range frame.Headers { if invalidRespHeaders[h] { return &Error{InvalidHeaderPresent, frame.StreamId} } } if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } return nil } func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { frame.CFHeader = h var err error if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { return err } reader := f.r if !f.headerCompressionDisabled { err := f.uncorkHeaderDecompressor(int64(h.length - 4)) if err != nil { return err } reader = f.headerDecompressor } frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { err = &Error{WrongCompressedPayloadSize, 0} } if err != nil { return err } var invalidHeaders map[string]bool if frame.StreamId%2 == 0 { invalidHeaders = invalidReqHeaders } else { invalidHeaders = invalidRespHeaders } for h := range frame.Headers { if invalidHeaders[h] { return &Error{InvalidHeaderPresent, frame.StreamId} } } if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } return nil } func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { var length uint32 if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { return nil, err } var frame DataFrame frame.StreamId = streamId frame.Flags = DataFlags(length >> 24) length &= 0xffffff frame.Data = make([]byte, length) if _, err := io.ReadFull(f.r, frame.Data); err != nil { return nil, err } if frame.StreamId == 0 { return nil, &Error{ZeroStreamId, 0} } return &frame, nil }
9,600
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/spdy/types.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package spdy implements the SPDY protocol (currently SPDY/3), described in // http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. package spdy import ( "bytes" "compress/zlib" "io" "net/http" ) // Version is the protocol version number that this package implements. const Version = 3 // ControlFrameType stores the type field in a control frame header. type ControlFrameType uint16 const ( TypeSynStream ControlFrameType = 0x0001 TypeSynReply = 0x0002 TypeRstStream = 0x0003 TypeSettings = 0x0004 TypePing = 0x0006 TypeGoAway = 0x0007 TypeHeaders = 0x0008 TypeWindowUpdate = 0x0009 ) // ControlFlags are the flags that can be set on a control frame. type ControlFlags uint8 const ( ControlFlagFin ControlFlags = 0x01 ControlFlagUnidirectional = 0x02 ControlFlagSettingsClearSettings = 0x01 ) // DataFlags are the flags that can be set on a data frame. type DataFlags uint8 const ( DataFlagFin DataFlags = 0x01 ) // MaxDataLength is the maximum number of bytes that can be stored in one frame. const MaxDataLength = 1<<24 - 1 // headerValueSepator separates multiple header values. const headerValueSeparator = "\x00" // Frame is a single SPDY frame in its unpacked in-memory representation. Use // Framer to read and write it. type Frame interface { write(f *Framer) error } // ControlFrameHeader contains all the fields in a control frame header, // in its unpacked in-memory representation. type ControlFrameHeader struct { // Note, high bit is the "Control" bit. version uint16 // spdy version number frameType ControlFrameType Flags ControlFlags length uint32 // length of data field } type controlFrame interface { Frame read(h ControlFrameHeader, f *Framer) error } // StreamId represents a 31-bit value identifying the stream. type StreamId uint32 // SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM // frame. type SynStreamFrame struct { CFHeader ControlFrameHeader StreamId StreamId AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to Priority uint8 // priority of this frame (3-bit) Slot uint8 // index in the server's credential vector of the client certificate Headers http.Header } // SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. type SynReplyFrame struct { CFHeader ControlFrameHeader StreamId StreamId Headers http.Header } // RstStreamStatus represents the status that led to a RST_STREAM. type RstStreamStatus uint32 const ( ProtocolError RstStreamStatus = iota + 1 InvalidStream RefusedStream UnsupportedVersion Cancel InternalError FlowControlError StreamInUse StreamAlreadyClosed InvalidCredentials FrameTooLarge ) // RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM // frame. type RstStreamFrame struct { CFHeader ControlFrameHeader StreamId StreamId Status RstStreamStatus } // SettingsFlag represents a flag in a SETTINGS frame. type SettingsFlag uint8 const ( FlagSettingsPersistValue SettingsFlag = 0x1 FlagSettingsPersisted = 0x2 ) // SettingsFlag represents the id of an id/value pair in a SETTINGS frame. type SettingsId uint32 const ( SettingsUploadBandwidth SettingsId = iota + 1 SettingsDownloadBandwidth SettingsRoundTripTime SettingsMaxConcurrentStreams SettingsCurrentCwnd SettingsDownloadRetransRate SettingsInitialWindowSize SettingsClientCretificateVectorSize ) // SettingsFlagIdValue is the unpacked, in-memory representation of the // combined flag/id/value for a setting in a SETTINGS frame. type SettingsFlagIdValue struct { Flag SettingsFlag Id SettingsId Value uint32 } // SettingsFrame is the unpacked, in-memory representation of a SPDY // SETTINGS frame. type SettingsFrame struct { CFHeader ControlFrameHeader FlagIdValues []SettingsFlagIdValue } // PingFrame is the unpacked, in-memory representation of a PING frame. type PingFrame struct { CFHeader ControlFrameHeader Id uint32 // unique id for this ping, from server is even, from client is odd. } // GoAwayStatus represents the status in a GoAwayFrame. type GoAwayStatus uint32 const ( GoAwayOK GoAwayStatus = iota GoAwayProtocolError GoAwayInternalError ) // GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. type GoAwayFrame struct { CFHeader ControlFrameHeader LastGoodStreamId StreamId // last stream id which was accepted by sender Status GoAwayStatus } // HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. type HeadersFrame struct { CFHeader ControlFrameHeader StreamId StreamId Headers http.Header } // WindowUpdateFrame is the unpacked, in-memory representation of a // WINDOW_UPDATE frame. type WindowUpdateFrame struct { CFHeader ControlFrameHeader StreamId StreamId DeltaWindowSize uint32 // additional number of bytes to existing window size } // TODO: Implement credential frame and related methods. // DataFrame is the unpacked, in-memory representation of a DATA frame. type DataFrame struct { // Note, high bit is the "Control" bit. Should be 0 for data frames. StreamId StreamId Flags DataFlags Data []byte // payload data of this frame } // A SPDY specific error. type ErrorCode string const ( UnlowercasedHeaderName ErrorCode = "header was not lowercased" DuplicateHeaders = "multiple headers with same name" WrongCompressedPayloadSize = "compressed payload size was incorrect" UnknownFrameType = "unknown frame type" InvalidControlFrame = "invalid control frame" InvalidDataFrame = "invalid data frame" InvalidHeaderPresent = "frame contained invalid header" ZeroStreamId = "stream id zero is disallowed" ) // Error contains both the type of error and additional values. StreamId is 0 // if Error is not associated with a stream. type Error struct { Err ErrorCode StreamId StreamId } func (e *Error) Error() string { return string(e.Err) } var invalidReqHeaders = map[string]bool{ "Connection": true, "Host": true, "Keep-Alive": true, "Proxy-Connection": true, "Transfer-Encoding": true, } var invalidRespHeaders = map[string]bool{ "Connection": true, "Keep-Alive": true, "Proxy-Connection": true, "Transfer-Encoding": true, } // Framer handles serializing/deserializing SPDY frames, including compressing/ // decompressing payloads. type Framer struct { headerCompressionDisabled bool w io.Writer headerBuf *bytes.Buffer headerCompressor *zlib.Writer r io.Reader headerReader io.LimitedReader headerDecompressor io.ReadCloser } // NewFramer allocates a new Framer for a given SPDY connection, represented by // a io.Writer and io.Reader. Note that Framer will read and write individual fields // from/to the Reader and Writer, so the caller should pass in an appropriately // buffered implementation to optimize performance. func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { compressBuf := new(bytes.Buffer) compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) if err != nil { return nil, err } framer := &Framer{ w: w, headerBuf: compressBuf, headerCompressor: compressor, r: r, } return framer, nil }
9,601
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/spdy/write.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package spdy import ( "encoding/binary" "io" "net/http" "strings" ) func (frame *SynStreamFrame) write(f *Framer) error { return f.writeSynStreamFrame(frame) } func (frame *SynReplyFrame) write(f *Framer) error { return f.writeSynReplyFrame(frame) } func (frame *RstStreamFrame) write(f *Framer) (err error) { if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } frame.CFHeader.version = Version frame.CFHeader.frameType = TypeRstStream frame.CFHeader.Flags = 0 frame.CFHeader.length = 8 // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } if frame.Status == 0 { return &Error{InvalidControlFrame, frame.StreamId} } if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { return } return } func (frame *SettingsFrame) write(f *Framer) (err error) { frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSettings frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { return } for _, flagIdValue := range frame.FlagIdValues { flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { return } } return } func (frame *PingFrame) write(f *Framer) (err error) { if frame.Id == 0 { return &Error{ZeroStreamId, 0} } frame.CFHeader.version = Version frame.CFHeader.frameType = TypePing frame.CFHeader.Flags = 0 frame.CFHeader.length = 4 // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { return } return } func (frame *GoAwayFrame) write(f *Framer) (err error) { frame.CFHeader.version = Version frame.CFHeader.frameType = TypeGoAway frame.CFHeader.Flags = 0 frame.CFHeader.length = 8 // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { return } return nil } func (frame *HeadersFrame) write(f *Framer) error { return f.writeHeadersFrame(frame) } func (frame *WindowUpdateFrame) write(f *Framer) (err error) { frame.CFHeader.version = Version frame.CFHeader.frameType = TypeWindowUpdate frame.CFHeader.Flags = 0 frame.CFHeader.length = 8 // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { return } return nil } func (frame *DataFrame) write(f *Framer) error { return f.writeDataFrame(frame) } // WriteFrame writes a frame. func (f *Framer) WriteFrame(frame Frame) error { return frame.write(f) } func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { return err } if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { return err } flagsAndLength := uint32(h.Flags)<<24 | h.length if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { return err } return nil } func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { n = 0 if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { return } n += 2 for name, values := range h { if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { return } n += 2 name = strings.ToLower(name) if _, err = io.WriteString(w, name); err != nil { return } n += len(name) v := strings.Join(values, headerValueSeparator) if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { return } n += 2 if _, err = io.WriteString(w, v); err != nil { return } n += len(v) } return } func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } // Marshal the headers. var writer io.Writer = f.headerBuf if !f.headerCompressionDisabled { writer = f.headerCompressor } if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { return } if !f.headerCompressionDisabled { f.headerCompressor.Flush() } // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSynStream frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return err } if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return err } if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { return err } if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { return err } if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { return err } if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { return err } f.headerBuf.Reset() return nil } func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } // Marshal the headers. var writer io.Writer = f.headerBuf if !f.headerCompressionDisabled { writer = f.headerCompressor } if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { return } if !f.headerCompressionDisabled { f.headerCompressor.Flush() } // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeSynReply frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { return } f.headerBuf.Reset() return } func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } // Marshal the headers. var writer io.Writer = f.headerBuf if !f.headerCompressionDisabled { writer = f.headerCompressor } if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { return } if !f.headerCompressionDisabled { f.headerCompressor.Flush() } // Set ControlFrameHeader. frame.CFHeader.version = Version frame.CFHeader.frameType = TypeHeaders frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) // Serialize frame to Writer. if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { return } if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { return } f.headerBuf.Reset() return } func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { if frame.StreamId == 0 { return &Error{ZeroStreamId, 0} } if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { return &Error{InvalidDataFrame, frame.StreamId} } // Serialize frame to Writer. if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { return } flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { return } if _, err = f.w.Write(frame.Data); err != nil { return } return nil }
9,602
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream
kubeflow_public_repos/fate-operator/vendor/github.com/docker/spdystream/spdy/dictionary.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package spdy // headerDictionary is the dictionary sent to the zlib compressor/decompressor. var headerDictionary = []byte{ 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, }
9,603
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/LICENSE
Copyright (c) 2016 David Calavera Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,604
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
package credentials // Helper is the interface a credentials store helper must implement. type Helper interface { // Add appends credentials to the store. Add(*Credentials) error // Delete removes credentials from the store. Delete(serverURL string) error // Get retrieves credentials from the store. // It returns username and secret as strings. Get(serverURL string) (string, string, error) // List returns the stored serverURLs and their associated usernames. List() (map[string]string, error) }
9,605
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
package credentials const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns // the same message and docker can handle it properly. errCredentialsNotFoundMessage = "credentials not found in native keychain" // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize // invalid credentials or credentials management operations errCredentialsMissingServerURLMessage = "no credentials server URL" errCredentialsMissingUsernameMessage = "no credentials username" ) // errCredentialsNotFound represents an error // raised when credentials are not in the store. type errCredentialsNotFound struct{} // Error returns the standard error message // for when the credentials are not in the store. func (errCredentialsNotFound) Error() string { return errCredentialsNotFoundMessage } // NewErrCredentialsNotFound creates a new error // for when the credentials are not in the store. func NewErrCredentialsNotFound() error { return errCredentialsNotFound{} } // IsErrCredentialsNotFound returns true if the error // was caused by not having a set of credentials in a store. func IsErrCredentialsNotFound(err error) bool { _, ok := err.(errCredentialsNotFound) return ok } // IsErrCredentialsNotFoundMessage returns true if the error // was caused by not having a set of credentials in a store. // // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { return err == errCredentialsNotFoundMessage } // errCredentialsMissingServerURL represents an error raised // when the credentials object has no server URL or when no // server URL is provided to a credentials operation requiring // one. type errCredentialsMissingServerURL struct{} func (errCredentialsMissingServerURL) Error() string { return errCredentialsMissingServerURLMessage } // errCredentialsMissingUsername represents an error raised // when the credentials object has no username or when no // username is provided to a credentials operation requiring // one. type errCredentialsMissingUsername struct{} func (errCredentialsMissingUsername) Error() string { return errCredentialsMissingUsernameMessage } // NewErrCredentialsMissingServerURL creates a new error for // errCredentialsMissingServerURL. func NewErrCredentialsMissingServerURL() error { return errCredentialsMissingServerURL{} } // NewErrCredentialsMissingUsername creates a new error for // errCredentialsMissingUsername. func NewErrCredentialsMissingUsername() error { return errCredentialsMissingUsername{} } // IsCredentialsMissingServerURL returns true if the error // was an errCredentialsMissingServerURL. func IsCredentialsMissingServerURL(err error) bool { _, ok := err.(errCredentialsMissingServerURL) return ok } // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { return err == errCredentialsMissingServerURLMessage } // IsCredentialsMissingUsername returns true if the error // was an errCredentialsMissingUsername. func IsCredentialsMissingUsername(err error) bool { _, ok := err.(errCredentialsMissingUsername) return ok } // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { return err == errCredentialsMissingUsernameMessage }
9,606
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
package credentials import ( "bufio" "bytes" "encoding/json" "fmt" "io" "os" "strings" ) // Credentials holds the information shared between docker and the credentials store. type Credentials struct { ServerURL string Username string Secret string } // isValid checks the integrity of Credentials object such that no credentials lack // a server URL or a username. // It returns whether the credentials are valid and the error if it isn't. // error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername func (c *Credentials) isValid() (bool, error) { if len(c.ServerURL) == 0 { return false, NewErrCredentialsMissingServerURL() } if len(c.Username) == 0 { return false, NewErrCredentialsMissingUsername() } return true, nil } // CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. // That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, // Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" var CredsLabel = "Docker Credentials" // SetCredsLabel is a simple setter for CredsLabel func SetCredsLabel(label string) { CredsLabel = label } // Serve initializes the credentials helper and parses the action argument. // This function is designed to be called from a command line interface. // It uses os.Args[1] as the key for the action. // It uses os.Stdin as input and os.Stdout as output. // This function terminates the program with os.Exit(1) if there is an error. func Serve(helper Helper) { var err error if len(os.Args) != 2 { err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0]) } if err == nil { err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) } if err != nil { fmt.Fprintf(os.Stdout, "%v\n", err) os.Exit(1) } } // HandleCommand uses a helper and a key to run a credential action. func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { switch key { case "store": return Store(helper, in) case "get": return Get(helper, in, out) case "erase": return Erase(helper, in) case "list": return List(helper, out) case "version": return PrintVersion(out) } return fmt.Errorf("Unknown credential action `%s`", key) } // Store uses a helper and an input reader to save credentials. // The reader must contain the JSON serialization of a Credentials struct. func Store(helper Helper, reader io.Reader) error { scanner := bufio.NewScanner(reader) buffer := new(bytes.Buffer) for scanner.Scan() { buffer.Write(scanner.Bytes()) } if err := scanner.Err(); err != nil && err != io.EOF { return err } var creds Credentials if err := json.NewDecoder(buffer).Decode(&creds); err != nil { return err } if ok, err := creds.isValid(); !ok { return err } return helper.Add(&creds) } // Get retrieves the credentials for a given server url. // The reader must contain the server URL to search. // The writer is used to write the JSON serialization of the credentials. func Get(helper Helper, reader io.Reader, writer io.Writer) error { scanner := bufio.NewScanner(reader) buffer := new(bytes.Buffer) for scanner.Scan() { buffer.Write(scanner.Bytes()) } if err := scanner.Err(); err != nil && err != io.EOF { return err } serverURL := strings.TrimSpace(buffer.String()) if len(serverURL) == 0 { return NewErrCredentialsMissingServerURL() } username, secret, err := helper.Get(serverURL) if err != nil { return err } resp := Credentials{ ServerURL: serverURL, Username: username, Secret: secret, } buffer.Reset() if err := json.NewEncoder(buffer).Encode(resp); err != nil { return err } fmt.Fprint(writer, buffer.String()) return nil } // Erase removes credentials from the store. // The reader must contain the server URL to remove. func Erase(helper Helper, reader io.Reader) error { scanner := bufio.NewScanner(reader) buffer := new(bytes.Buffer) for scanner.Scan() { buffer.Write(scanner.Bytes()) } if err := scanner.Err(); err != nil && err != io.EOF { return err } serverURL := strings.TrimSpace(buffer.String()) if len(serverURL) == 0 { return NewErrCredentialsMissingServerURL() } return helper.Delete(serverURL) } //List returns all the serverURLs of keys in //the OS store as a list of strings func List(helper Helper, writer io.Writer) error { accts, err := helper.List() if err != nil { return err } return json.NewEncoder(writer).Encode(accts) } //PrintVersion outputs the current version. func PrintVersion(writer io.Writer) error { fmt.Fprintln(writer, Version) return nil }
9,607
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
package credentials // Version holds a string describing the current version const Version = "0.6.3"
9,608
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/client/client.go
package client import ( "bytes" "encoding/json" "fmt" "strings" "github.com/docker/docker-credential-helpers/credentials" ) // isValidCredsMessage checks if 'msg' contains invalid credentials error message. // It returns whether the logs are free of invalid credentials errors and the error if it isn't. // error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } return nil } // Store uses an external program to save credentials. func Store(program ProgramFunc, creds *credentials.Credentials) error { cmd := program("store") buffer := new(bytes.Buffer) if err := json.NewEncoder(buffer).Encode(creds); err != nil { return err } cmd.Input(buffer) out, err := cmd.Output() if err != nil { t := strings.TrimSpace(string(out)) if isValidErr := isValidCredsMessage(t); isValidErr != nil { err = isValidErr } return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) } return nil } // Get executes an external program to get the credentials from a native store. func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { cmd := program("get") cmd.Input(strings.NewReader(serverURL)) out, err := cmd.Output() if err != nil { t := strings.TrimSpace(string(out)) if credentials.IsErrCredentialsNotFoundMessage(t) { return nil, credentials.NewErrCredentialsNotFound() } if isValidErr := isValidCredsMessage(t); isValidErr != nil { err = isValidErr } return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) } resp := &credentials.Credentials{ ServerURL: serverURL, } if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { return nil, err } return resp, nil } // Erase executes a program to remove the server credentials from the native store. func Erase(program ProgramFunc, serverURL string) error { cmd := program("erase") cmd.Input(strings.NewReader(serverURL)) out, err := cmd.Output() if err != nil { t := strings.TrimSpace(string(out)) if isValidErr := isValidCredsMessage(t); isValidErr != nil { err = isValidErr } return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) } return nil } // List executes a program to list server credentials in the native store. func List(program ProgramFunc) (map[string]string, error) { cmd := program("list") cmd.Input(strings.NewReader("unused")) out, err := cmd.Output() if err != nil { t := strings.TrimSpace(string(out)) if isValidErr := isValidCredsMessage(t); isValidErr != nil { err = isValidErr } return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) } var resp map[string]string if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { return nil, err } return resp, nil }
9,609
0
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers
kubeflow_public_repos/fate-operator/vendor/github.com/docker/docker-credential-helpers/client/command.go
package client import ( "fmt" "io" "os" "os/exec" ) // Program is an interface to execute external programs. type Program interface { Output() ([]byte, error) Input(in io.Reader) } // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program // NewShellProgramFunc creates programs that are executed in a Shell. func NewShellProgramFunc(name string) ProgramFunc { return NewShellProgramFuncWithEnv(name, nil) } // NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { return func(args ...string) Program { return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} } } func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { programCmd := exec.Command(commandName, args...) programCmd.Env = os.Environ() if env != nil { for k, v := range *env { programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) } } programCmd.Stderr = os.Stderr return programCmd } // Shell invokes shell commands to talk with a remote credentials helper. type Shell struct { cmd *exec.Cmd } // Output returns responses from the remote credentials helper. func (s *Shell) Output() ([]byte, error) { return s.cmd.Output() } // Input sets the input to send to a remote credentials helper. func (s *Shell) Input(in io.Reader) { s.cmd.Stdin = in }
9,610
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
// +build !appengine,!js,windows package logrus import ( "io" "os" "golang.org/x/sys/windows" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: handle := windows.Handle(v.Fd()) var mode uint32 if err := windows.GetConsoleMode(handle, &mode); err != nil { return false } mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING if err := windows.SetConsoleMode(handle, mode); err != nil { return false } return true } return false }
9,611
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/buffer_pool.go
package logrus import ( "bytes" "sync" ) var ( bufferPool BufferPool ) type BufferPool interface { Put(*bytes.Buffer) Get() *bytes.Buffer } type defaultPool struct { pool *sync.Pool } func (p *defaultPool) Put(buf *bytes.Buffer) { p.pool.Put(buf) } func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } func getBuffer() *bytes.Buffer { return bufferPool.Get() } func putBuffer(buf *bytes.Buffer) { buf.Reset() bufferPool.Put(buf) } // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { bufferPool = bp } func init() { SetBufferPool(&defaultPool{ pool: &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, }, }) }
9,612
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_js.go
// +build js package logrus func isTerminal(fd int) bool { return false }
9,613
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
// +build linux aix // +build !js package logrus import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TCGETS func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil }
9,614
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/go.mod
module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 ) go 1.13
9,615
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/entry.go
package logrus import ( "bytes" "context" "fmt" "os" "reflect" "runtime" "strings" "sync" "time" ) var ( // qualified package name, cached at first use logrusPackage string // Positions in the call stack when tracing to report the calling method minimumCallerDepth int // Used for caller information initialisation callerInitOnce sync.Once ) const ( maximumCallerDepth int = 25 knownLogrusFrames int = 4 ) func init() { // start at the bottom of the stack before the package-name cache is primed minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Trace, Debug, // Info, Warn, Error, Fatal or Panic is called on it. These objects can be // reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger // Contains all the fields set by the user. Data Fields // Time at which the log entry was created Time time.Time // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Calling method, with package name Caller *runtime.Frame // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer // Contains the context set by the user. Useful for hook processing etc. Context context.Context // err may contain a field formatting error err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, // Default is three fields, plus one optional. Give a little extra room. Data: make(Fields, 6), } } // Returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { serialized, err := entry.Bytes() if err != nil { return "", err } str := string(serialized) return str, nil } // Add an error as single field (using the key defined in ErrorKey) to the Entry. func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } // Add a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { dataCopy[k] = v } return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } // Add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { data[k] = v } fieldErr := entry.err for k, v := range fields { isErrField := false if t := reflect.TypeOf(v); t != nil { switch t.Kind() { case reflect.Func: isErrField = true case reflect.Ptr: isErrField = t.Elem().Kind() == reflect.Func } } if isErrField { tmp := fmt.Sprintf("can not add field %q", k) if fieldErr != "" { fieldErr = entry.err + ", " + tmp } else { fieldErr = tmp } } else { data[k] = v } } return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { dataCopy[k] = v } return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} } // getPackageName reduces a fully qualified function name to the package name // There really ought to be to be a better way... func getPackageName(f string) string { for { lastPeriod := strings.LastIndex(f, ".") lastSlash := strings.LastIndex(f, "/") if lastPeriod > lastSlash { f = f[:lastPeriod] } else { break } } return f } // getCaller retrieves the name of the first non-logrus calling function func getCaller() *runtime.Frame { // cache this package's fully-qualified name callerInitOnce.Do(func() { pcs := make([]uintptr, maximumCallerDepth) _ = runtime.Callers(0, pcs) // dynamic get the package name and the minimum caller depth for i := 0; i < maximumCallerDepth; i++ { funcName := runtime.FuncForPC(pcs[i]).Name() if strings.Contains(funcName, "getCaller") { logrusPackage = getPackageName(funcName) break } } minimumCallerDepth = knownLogrusFrames }) // Restrict the lookback frames to avoid runaway lookups pcs := make([]uintptr, maximumCallerDepth) depth := runtime.Callers(minimumCallerDepth, pcs) frames := runtime.CallersFrames(pcs[:depth]) for f, again := frames.Next(); again; f, again = frames.Next() { pkg := getPackageName(f.Function) // If the caller isn't part of this package, we're done if pkg != logrusPackage { return &f //nolint:scopelint } } // if we got here, we failed to find the caller's context return nil } func (entry Entry) HasCaller() (has bool) { return entry.Logger != nil && entry.Logger.ReportCaller && entry.Caller != nil } // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { var buffer *bytes.Buffer // Default to now, but allow users to override if they want. // // We don't have to worry about polluting future calls to Entry#log() // with this assignment because this function is declared with a // non-pointer receiver. if entry.Time.IsZero() { entry.Time = time.Now() } entry.Level = level entry.Message = msg entry.Logger.mu.Lock() if entry.Logger.ReportCaller { entry.Caller = getCaller() } entry.Logger.mu.Unlock() entry.fireHooks() buffer = getBuffer() defer func() { entry.Buffer = nil putBuffer(buffer) }() buffer.Reset() entry.Buffer = buffer entry.write() entry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { panic(&entry) } } func (entry *Entry) fireHooks() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() err := entry.Logger.Hooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } if _, err = entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) } } func (entry *Entry) Trace(args ...interface{}) { entry.Log(TraceLevel, args...) } func (entry *Entry) Debug(args ...interface{}) { entry.Log(DebugLevel, args...) } func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { entry.Warn(args...) } func (entry *Entry) Error(args ...interface{}) { entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { entry.Log(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { entry.Log(PanicLevel, args...) panic(fmt.Sprint(args...)) } // Entry Printf family functions func (entry *Entry) Logf(level Level, format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.Log(level, fmt.Sprintf(format, args...)) } } func (entry *Entry) Tracef(format string, args ...interface{}) { entry.Logf(TraceLevel, format, args...) } func (entry *Entry) Debugf(format string, args ...interface{}) { entry.Logf(DebugLevel, format, args...) } func (entry *Entry) Infof(format string, args ...interface{}) { entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { entry.Infof(format, args...) } func (entry *Entry) Warnf(format string, args ...interface{}) { entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { entry.Warnf(format, args...) } func (entry *Entry) Errorf(format string, args ...interface{}) { entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { entry.Logf(FatalLevel, format, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { entry.Logf(PanicLevel, format, args...) } // Entry Println family functions func (entry *Entry) Logln(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.Log(level, entry.sprintlnn(args...)) } } func (entry *Entry) Traceln(args ...interface{}) { entry.Logln(TraceLevel, args...) } func (entry *Entry) Debugln(args ...interface{}) { entry.Logln(DebugLevel, args...) } func (entry *Entry) Infoln(args ...interface{}) { entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { entry.Infoln(args...) } func (entry *Entry) Warnln(args ...interface{}) { entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { entry.Warnln(args...) } func (entry *Entry) Errorln(args ...interface{}) { entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { entry.Logln(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. func (entry *Entry) sprintlnn(args ...interface{}) string { msg := fmt.Sprintln(args...) return msg[:len(msg)-1] }
9,616
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/logrus.go
package logrus import ( "fmt" "log" "strings" ) // Fields type, used to pass to `WithFields`. type Fields map[string]interface{} // Level type type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { if b, err := level.MarshalText(); err == nil { return string(b) } else { return "unknown" } } // ParseLevel takes a string level and returns the Logrus log level constant. func ParseLevel(lvl string) (Level, error) { switch strings.ToLower(lvl) { case "panic": return PanicLevel, nil case "fatal": return FatalLevel, nil case "error": return ErrorLevel, nil case "warn", "warning": return WarnLevel, nil case "info": return InfoLevel, nil case "debug": return DebugLevel, nil case "trace": return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } // UnmarshalText implements encoding.TextUnmarshaler. func (level *Level) UnmarshalText(text []byte) error { l, err := ParseLevel(string(text)) if err != nil { return err } *level = l return nil } func (level Level) MarshalText() ([]byte, error) { switch level { case TraceLevel: return []byte("trace"), nil case DebugLevel: return []byte("debug"), nil case InfoLevel: return []byte("info"), nil case WarnLevel: return []byte("warning"), nil case ErrorLevel: return []byte("error"), nil case FatalLevel: return []byte("fatal"), nil case PanicLevel: return []byte("panic"), nil } return nil, fmt.Errorf("not a valid logrus level %d", level) } // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, FatalLevel, ErrorLevel, WarnLevel, InfoLevel, DebugLevel, TraceLevel, } // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service. ErrorLevel // WarnLevel level. Non-critical entries that deserve eyes. WarnLevel // InfoLevel level. General operational entries about what's going on inside the // application. InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel // TraceLevel level. Designates finer-grained informational events than the Debug. TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger var ( _ StdLogger = &log.Logger{} _ StdLogger = &Entry{} _ StdLogger = &Logger{} ) // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard // interface, this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) Println(...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) Fatalln(...interface{}) Panic(...interface{}) Panicf(string, ...interface{}) Panicln(...interface{}) } // The FieldLogger interface generalizes the Entry and Logger types type FieldLogger interface { WithField(key string, value interface{}) *Entry WithFields(fields Fields) *Entry WithError(err error) *Entry Debugf(format string, args ...interface{}) Infof(format string, args ...interface{}) Printf(format string, args ...interface{}) Warnf(format string, args ...interface{}) Warningf(format string, args ...interface{}) Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Panicf(format string, args ...interface{}) Debug(args ...interface{}) Info(args ...interface{}) Print(args ...interface{}) Warn(args ...interface{}) Warning(args ...interface{}) Error(args ...interface{}) Fatal(args ...interface{}) Panic(args ...interface{}) Debugln(args ...interface{}) Infoln(args ...interface{}) Println(args ...interface{}) Warnln(args ...interface{}) Warningln(args ...interface{}) Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) // IsDebugEnabled() bool // IsInfoEnabled() bool // IsWarnEnabled() bool // IsErrorEnabled() bool // IsFatalEnabled() bool // IsPanicEnabled() bool } // Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is // here for consistancy. Do not use. Use Logger or Entry instead. type Ext1FieldLogger interface { FieldLogger Tracef(format string, args ...interface{}) Trace(args ...interface{}) Traceln(args ...interface{}) }
9,617
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
// +build darwin dragonfly freebsd netbsd openbsd // +build !js package logrus import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TIOCGETA func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil }
9,618
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/formatter.go
package logrus import "time" // Default key names for the default fields const ( defaultTimestampFormat = time.RFC3339 FieldKeyMsg = "msg" FieldKeyLevel = "level" FieldKeyTime = "time" FieldKeyLogrusError = "logrus_error" FieldKeyFunc = "func" FieldKeyFile = "file" ) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // // * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. // * `entry.Data["time"]`. The timestamp. // * `entry.Data["level"]. The level the entry was logged at. // // Any additional fields added with `WithField` or `WithFields` are also in // `entry.Data`. Format is expected to return an array of bytes which are then // logged to `logger.Out`. type Formatter interface { Format(*Entry) ([]byte, error) } // This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") // // Would just silently drop the user provided level. Instead with this code // it'll logged as: // // {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { timeKey := fieldMap.resolve(FieldKeyTime) if t, ok := data[timeKey]; ok { data["fields."+timeKey] = t delete(data, timeKey) } msgKey := fieldMap.resolve(FieldKeyMsg) if m, ok := data[msgKey]; ok { data["fields."+msgKey] = m delete(data, msgKey) } levelKey := fieldMap.resolve(FieldKeyLevel) if l, ok := data[levelKey]; ok { data["fields."+levelKey] = l delete(data, levelKey) } logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) if l, ok := data[logrusErrKey]; ok { data["fields."+logrusErrKey] = l delete(data, logrusErrKey) } // If reportCaller is not set, 'func' will not conflict. if reportCaller { funcKey := fieldMap.resolve(FieldKeyFunc) if l, ok := data[funcKey]; ok { data["fields."+funcKey] = l } fileKey := fieldMap.resolve(FieldKeyFile) if l, ok := data[fileKey]; ok { data["fields."+fileKey] = l } } }
9,619
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/text_formatter.go
package logrus import ( "bytes" "fmt" "os" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" ) const ( red = 31 yellow = 33 blue = 36 gray = 37 ) var baseTimestamp time.Time func init() { baseTimestamp = time.Now() } // TextFormatter formats logs into text type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool // Force disabling colors. DisableColors bool // Force quoting of all values ForceQuote bool // DisableQuote disables quoting for all values. // DisableQuote will have a lower priority than ForceQuote. // If both of them are set to true, quote will be forced on all values. DisableQuote bool // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ EnvironmentOverrideColors bool // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool // Enable logging the full timestamp when a TTY is attached instead of just // the time passed since beginning of execution. FullTimestamp bool // TimestampFormat to use for display when a full timestamp is printed TimestampFormat string // The fields are sorted by default for a consistent output. For applications // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool // The keys sorting function, when uninitialized it uses sort.Strings. SortingFunc func([]string) // Disables the truncation of the level text to 4 characters. DisableLevelTruncation bool // PadLevelText Adds padding the level text so that all the levels output at the same length // PadLevelText is a superset of the DisableLevelTruncation option PadLevelText bool // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &TextFormatter{ // FieldMap: FieldMap{ // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", // FieldKeyMsg: "@message"}} FieldMap FieldMap // CallerPrettyfier can be set by the user to modify the content // of the function and file keys in the data when ReportCaller is // activated. If any of the returned value is the empty string the // corresponding key will be removed from fields. CallerPrettyfier func(*runtime.Frame) (function string, file string) terminalInitOnce sync.Once // The max length of the level text, generated dynamically on init levelTextMaxLength int } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) } // Get the max length of the level text for _, level := range AllLevels { levelTextLength := utf8.RuneCount([]byte(level.String())) if levelTextLength > f.levelTextMaxLength { f.levelTextMaxLength = levelTextLength } } } func (f *TextFormatter) isColored() bool { isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) if f.EnvironmentOverrideColors { switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { case ok && force != "0": isColored = true case ok && force == "0", os.Getenv("CLICOLOR") == "0": isColored = false } } return isColored && !f.DisableColors } // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields) for k, v := range entry.Data { data[k] = v } prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) keys := make([]string, 0, len(data)) for k := range data { keys = append(keys, k) } var funcVal, fileVal string fixedKeys := make([]string, 0, 4+len(data)) if !f.DisableTimestamp { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) if entry.Message != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) } if entry.err != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) } if entry.HasCaller() { if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } else { funcVal = entry.Caller.Function fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) } if funcVal != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) } if fileVal != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) } } if !f.DisableSorting { if f.SortingFunc == nil { sort.Strings(keys) fixedKeys = append(fixedKeys, keys...) } else { if !f.isColored() { fixedKeys = append(fixedKeys, keys...) f.SortingFunc(fixedKeys) } else { f.SortingFunc(keys) } } } else { fixedKeys = append(fixedKeys, keys...) } var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } if f.isColored() { f.printColored(b, entry, keys, data, timestampFormat) } else { for _, key := range fixedKeys { var value interface{} switch { case key == f.FieldMap.resolve(FieldKeyTime): value = entry.Time.Format(timestampFormat) case key == f.FieldMap.resolve(FieldKeyLevel): value = entry.Level.String() case key == f.FieldMap.resolve(FieldKeyMsg): value = entry.Message case key == f.FieldMap.resolve(FieldKeyLogrusError): value = entry.err case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): value = funcVal case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): value = fileVal default: value = data[key] } f.appendKeyValue(b, key, value) } } b.WriteByte('\n') return b.Bytes(), nil } func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red default: levelColor = blue } levelText := strings.ToUpper(entry.Level.String()) if !f.DisableLevelTruncation && !f.PadLevelText { levelText = levelText[0:4] } if f.PadLevelText { // Generates the format string used in the next line, for example "%-6s" or "%-7s". // Based on the max level text length. formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" // Formats the level text by appending spaces up to the max length, for example: // - "INFO " // - "WARNING" levelText = fmt.Sprintf(formatString, levelText) } // Remove a single newline if it already exists in the message to keep // the behavior of logrus text_formatter the same as the stdlib log package entry.Message = strings.TrimSuffix(entry.Message, "\n") caller := "" if entry.HasCaller() { funcVal := fmt.Sprintf("%s()", entry.Caller.Function) fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } if fileVal == "" { caller = funcVal } else if funcVal == "" { caller = fileVal } else { caller = fileVal + " " + funcVal } } switch { case f.DisableTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) case !f.FullTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) default: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } } func (f *TextFormatter) needsQuoting(text string) bool { if f.ForceQuote { return true } if f.QuoteEmptyFields && len(text) == 0 { return true } if f.DisableQuote { return false } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } return false } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { if b.Len() > 0 { b.WriteByte(' ') } b.WriteString(key) b.WriteByte('=') f.appendValue(b, value) } func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { stringVal, ok := value.(string) if !ok { stringVal = fmt.Sprint(value) } if !f.needsQuoting(stringVal) { b.WriteString(stringVal) } else { b.WriteString(fmt.Sprintf("%q", stringVal)) } }
9,620
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/README.md
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. **Logrus is in maintenance-mode.** We will not be introducing new features. It's simply too hard to do in a way that won't break many people's projects, which is the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a reason to do a major, breaking iteration into Logrus V2, since the fantastic Go community has built those independently. Many fantastic alternatives have sprung up. Logrus would look like those, had it been re-designed with what we know about structured logging in Go today. Check out, for example, [Zerolog][zerolog], [Zap][zap], and [Apex][apex]. [zerolog]: https://github.com/rs/zerolog [zap]: https://github.com/uber-go/zap [apex]: https://github.com/apex/log **Seeing weird case-sensitive problems?** It's in the past been possible to import Logrus as both upper- and lower-case. Due to the Go package environment, this caused issues in the community and we needed a standard. Some environments experienced problems with the upper-case variant, so the lower-case was decided. Everything using `logrus` will need to use the lower-case: `github.com/sirupsen/logrus`. Any package that isn't, should be changed. To fix Glide, see [these comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). For an in-depth explanation of the casing issue, see [this comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). Nicely color-coded in development (when a TTY is attached, otherwise just plain text): ![Colored](http://i.imgur.com/PY7qMwd.png) With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: ```json {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} {"level":"warning","msg":"The group's number increased tremendously!", "number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} {"animal":"walrus","level":"info","msg":"A giant walrus appears!", "size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} {"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", "size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} {"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, "time":"2014-03-10 19:57:38.562543128 -0400 EDT"} ``` With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not attached, the output is compatible with the [logfmt](http://godoc.org/github.com/kr/logfmt) format: ```text time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true ``` To ensure this behaviour even if a TTY is attached, set your formatter as follows: ```go log.SetFormatter(&log.TextFormatter{ DisableColors: true, FullTimestamp: true, }) ``` #### Logging Method Name If you wish to add the calling method as a field, instruct the logger via: ```go log.SetReportCaller(true) ``` This adds the caller as 'method' like so: ```json {"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", "time":"2014-03-10 19:57:38.562543129 -0400 EDT"} ``` ```text time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your environment via benchmarks: ``` go test -bench=.*CallerTracing ``` #### Case-sensitivity The organization's name was changed to lower-case--and this will not be changed back. If you are getting import conflicts due to case sensitivity, please use the lower-case import: `github.com/sirupsen/logrus`. #### Example The simplest way to use Logrus is simply the package-level exported logger: ```go package main import ( log "github.com/sirupsen/logrus" ) func main() { log.WithFields(log.Fields{ "animal": "walrus", }).Info("A walrus appears") } ``` Note that it's completely api-compatible with the stdlib logger, so you can replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` and you'll now have the flexibility of Logrus. You can customize it all you want: ```go package main import ( "os" log "github.com/sirupsen/logrus" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) // Output to stdout instead of the default stderr // Can be any io.Writer, see below for File example log.SetOutput(os.Stdout) // Only log the warning severity or above. log.SetLevel(log.WarnLevel) } func main() { log.WithFields(log.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") log.WithFields(log.Fields{ "omg": true, "number": 122, }).Warn("The group's number increased tremendously!") log.WithFields(log.Fields{ "omg": true, "number": 100, }).Fatal("The ice breaks!") // A common pattern is to re-use fields between logging statements by re-using // the logrus.Entry returned from WithFields() contextLogger := log.WithFields(log.Fields{ "common": "this is a common field", "other": "I also should be logged always", }) contextLogger.Info("I'll be logged with common and other field") contextLogger.Info("Me too") } ``` For more advanced usage such as logging to multiple locations from the same application, you can also create an instance of the `logrus` Logger: ```go package main import ( "os" "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. var log = logrus.New() func main() { // The API for setting attributes is a little different than the package level // exported logger. See Godoc. log.Out = os.Stdout // You could set this to any `io.Writer` such as a file // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { // log.Out = file // } else { // log.Info("Failed to log to file, using default stderr") // } log.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") } ``` #### Fields Logrus encourages careful, structured logging through logging fields instead of long, unparseable error messages. For example, instead of: `log.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: ```go log.WithFields(log.Fields{ "event": event, "topic": topic, "key": key, }).Fatal("Failed to send event") ``` We've found this API forces you to think about logging in a way that produces much more useful logging messages. We've been in countless situations where just a single added field to a log statement that was already there would've saved us hours. The `WithFields` call is optional. In general, with Logrus using any of the `printf`-family functions should be seen as a hint you should add a field, however, you can still use the `printf`-family functions with Logrus. #### Default Fields Often it's helpful to have fields _always_ attached to log statements in an application or parts of one. For example, you may want to always log the `request_id` and `user_ip` in the context of a request. Instead of writing `log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on every line, you can create a `logrus.Entry` to pass around instead: ```go requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) requestLogger.Info("something happened on that request") # will log request_id and user_ip requestLogger.Warn("something not great happened") ``` #### Hooks You can add hooks for logging levels. For example to send errors to an exception tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to multiple places simultaneously, e.g. syslog. Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in `init`: ```go import ( log "github.com/sirupsen/logrus" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. log.AddHook(airbrake.NewHook(123, "xyz", "production")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { log.Error("Unable to connect to local syslog daemon") } else { log.AddHook(hook) } } ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) #### Level logging Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") log.Error("Something failed but I'm not quitting.") // Calls os.Exit(1) after logging log.Fatal("Bye.") // Calls panic() after logging log.Panic("I'm bailing.") ``` You can set the logging level on a `Logger`, then it will only log entries with that severity or anything above it: ```go // Will log anything that is info or above (warn, error, fatal, panic). Default. log.SetLevel(log.InfoLevel) ``` It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. #### Entries Besides the fields added with `WithField` or `WithFields` some fields are automatically added to all logging events: 1. `time`. The timestamp when the entry was created. 2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after the `AddFields` call. E.g. `Failed to send event.` 3. `level`. The logging level. E.g. `info`. #### Environments Logrus has no notion of environment. If you wish for hooks and formatters to only be used in specific environments, you should handle that yourself. For example, if your application has a global variable `Environment`, which is a string representation of the environment you could do: ```go import ( log "github.com/sirupsen/logrus" ) init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { log.SetFormatter(&log.JSONFormatter{}) } else { // The TextFormatter is default, you don't actually have to do this. log.SetFormatter(&log.TextFormatter{}) } } ``` This configuration is how `logrus` was intended to be used, but JSON in production is mostly only useful if you do log aggregation with tools like Splunk or Logstash. #### Formatters The built-in logging formatters are: * `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true`. For Windows, see [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). Third party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. * [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. * [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a `Fields` type (`map[string]interface{}`) with all your fields as well as the default ones (see Entries section above): ```go type MyJSONFormatter struct { } log.SetFormatter(new(MyJSONFormatter)) func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // Note this doesn't include Time, Level and Message which are available on // the Entry. Consult `godoc` on information about those fields or read the // source of the official loggers. serialized, err := json.Marshal(entry.Data) if err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) } return append(serialized, '\n'), nil } ``` #### Logger as an `io.Writer` Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. ```go w := logger.Writer() defer w.Close() srv := http.Server{ // create a stdlib log.Logger that writes to // logrus.Logger. ErrorLog: log.New(w, "", 0), } ``` Each line written to that writer will be printed the usual way, using formatters and hooks. The level for those entries is `info`. This means that we can override the standard library logger easily: ```go logger := logrus.New() logger.Formatter = &logrus.JSONFormatter{} // Use logrus for standard log output // Note that `log` here references stdlib's log // Not logrus imported under the name `log`. log.SetOutput(logger.Writer()) ``` #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. #### Tools | Tool | Description | | ---- | ----------- | |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| |[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: * decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go import( "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "testing" ) func TestSomething(t*testing.T){ logger, hook := test.NewNullLogger() logger.Error("Helloerror") assert.Equal(t, 1, len(hook.Entries)) assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) assert.Equal(t, "Helloerror", hook.LastEntry().Message) hook.Reset() assert.Nil(t, hook.LastEntry()) } ``` #### Fatal handlers Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. ``` ... handler := func() { // gracefully shutdown something... } logrus.RegisterExitHandler(handler) ... ``` #### Thread safety By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. Situation when locking is not needed includes: * You have no hooks registered, or hooks calling is already thread-safe. * Writing to logger.Out is already thread-safe, for example: 1) logger.Out is protected by locks. 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
9,621
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
// +build !appengine,!js,!windows,!nacl,!plan9 package logrus import ( "io" "os" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: return isTerminal(int(v.Fd())) default: return false } }
9,622
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/logger.go
package logrus import ( "context" "io" "os" "sync" "sync/atomic" "time" ) // LogFunction For big messages, it can be more efficient to pass a function // and only call it if the log level is actually enables rather than // generating the log message and then checking if the level is enabled type LogFunction func()[]interface{} type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking // service, log to StatsD or dump the core on fatal errors. Hooks LevelHooks // All log entries pass through the formatter before logged to Out. The // included formatters are `TextFormatter` and `JSONFormatter` for which // TextFormatter is the default. In development (when a TTY is attached) it // logs with colors, but to a file it wouldn't. You can easily implement your // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter // Flag for whether to log caller info (off by default) ReportCaller bool // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. Level Level // Used to sync writing to the log. Locking is enabled by Default mu MutexWrap // Reusable empty entry entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc } type exitFunc func(int) type MutexWrap struct { lock sync.Mutex disabled bool } func (mw *MutexWrap) Lock() { if !mw.disabled { mw.lock.Lock() } } func (mw *MutexWrap) Unlock() { if !mw.disabled { mw.lock.Unlock() } } func (mw *MutexWrap) Disable() { mw.disabled = true } // Creates a new logger. Configuration should be set by changing `Formatter`, // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // // var log = &logrus.Logger{ // Out: os.Stderr, // Formatter: new(logrus.TextFormatter), // Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } // // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ Out: os.Stderr, Formatter: new(TextFormatter), Hooks: make(LevelHooks), Level: InfoLevel, ExitFunc: os.Exit, ReportCaller: false, } } func (logger *Logger) newEntry() *Entry { entry, ok := logger.entryPool.Get().(*Entry) if ok { return entry } return NewEntry(logger) } func (logger *Logger) releaseEntry(entry *Entry) { entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // WithField allocates a new entry and adds a field to it. // Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to // this new returned entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithField(key, value) } // Adds a struct of fields to the log entry. All it does is call `WithField` for // each `Field`. func (logger *Logger) WithFields(fields Fields) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithFields(fields) } // Add an error as single field to the log entry. All it does is call // `WithError` for the given `error`. func (logger *Logger) WithError(err error) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithError(err) } // Add a context to the log entry. func (logger *Logger) WithContext(ctx context.Context) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithContext(ctx) } // Overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithTime(t) } func (logger *Logger) Logf(level Level, format string, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Logf(level, format, args...) logger.releaseEntry(entry) } } func (logger *Logger) Tracef(format string, args ...interface{}) { logger.Logf(TraceLevel, format, args...) } func (logger *Logger) Debugf(format string, args ...interface{}) { logger.Logf(DebugLevel, format, args...) } func (logger *Logger) Infof(format string, args ...interface{}) { logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { entry := logger.newEntry() entry.Printf(format, args...) logger.releaseEntry(entry) } func (logger *Logger) Warnf(format string, args ...interface{}) { logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { logger.Logf(FatalLevel, format, args...) logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Log(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) LogFn(level Level, fn LogFunction) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Log(level, fn()...) logger.releaseEntry(entry) } } func (logger *Logger) Trace(args ...interface{}) { logger.Log(TraceLevel, args...) } func (logger *Logger) Debug(args ...interface{}) { logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { logger.Log(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { logger.Log(PanicLevel, args...) } func (logger *Logger) TraceFn(fn LogFunction) { logger.LogFn(TraceLevel, fn) } func (logger *Logger) DebugFn(fn LogFunction) { logger.LogFn(DebugLevel, fn) } func (logger *Logger) InfoFn(fn LogFunction) { logger.LogFn(InfoLevel, fn) } func (logger *Logger) PrintFn(fn LogFunction) { entry := logger.newEntry() entry.Print(fn()...) logger.releaseEntry(entry) } func (logger *Logger) WarnFn(fn LogFunction) { logger.LogFn(WarnLevel, fn) } func (logger *Logger) WarningFn(fn LogFunction) { logger.WarnFn(fn) } func (logger *Logger) ErrorFn(fn LogFunction) { logger.LogFn(ErrorLevel, fn) } func (logger *Logger) FatalFn(fn LogFunction) { logger.LogFn(FatalLevel, fn) logger.Exit(1) } func (logger *Logger) PanicFn(fn LogFunction) { logger.LogFn(PanicLevel, fn) } func (logger *Logger) Logln(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Logln(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) Traceln(args ...interface{}) { logger.Logln(TraceLevel, args...) } func (logger *Logger) Debugln(args ...interface{}) { logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { entry := logger.newEntry() entry.Println(args...) logger.releaseEntry(entry) } func (logger *Logger) Warnln(args ...interface{}) { logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { logger.Logln(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { logger.Logln(PanicLevel, args...) } func (logger *Logger) Exit(code int) { runHandlers() if logger.ExitFunc == nil { logger.ExitFunc = os.Exit } logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to //write concurrently to a file (within 4k message on Linux). //In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } // SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } // GetLevel returns the logger level. func (logger *Logger) GetLevel() Level { return logger.level() } // AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } // IsLevelEnabled checks if the log level of the logger is greater than the level param func (logger *Logger) IsLevelEnabled(level Level) bool { return logger.level() >= level } // SetFormatter sets the logger formatter. func (logger *Logger) SetFormatter(formatter Formatter) { logger.mu.Lock() defer logger.mu.Unlock() logger.Formatter = formatter } // SetOutput sets the logger output. func (logger *Logger) SetOutput(output io.Writer) { logger.mu.Lock() defer logger.mu.Unlock() logger.Out = output } func (logger *Logger) SetReportCaller(reportCaller bool) { logger.mu.Lock() defer logger.mu.Unlock() logger.ReportCaller = reportCaller } // ReplaceHooks replaces the logger hooks and returns the old ones func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Lock() oldHooks := logger.Hooks logger.Hooks = hooks logger.mu.Unlock() return oldHooks }
9,623
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/alt_exit.go
package logrus // The following code was sourced and modified from the // https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka <[email protected]>. // // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import ( "fmt" "os" ) var handlers = []func(){} func runHandler(handler func()) { defer func() { if err := recover(); err != nil { fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) } }() handler() } func runHandlers() { for _, handler := range handlers { runHandler(handler) } } // Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) func Exit(code int) { runHandlers() os.Exit(code) } // RegisterExitHandler appends a Logrus Exit handler to the list of handlers, // call logrus.Exit to invoke all handlers. The handlers will also be invoked when // any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be // closing database connections, or sending a alert that the application is // closing. func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } // DeferExitHandler prepends a Logrus Exit handler to the list of handlers, // call logrus.Exit to invoke all handlers. The handlers will also be invoked when // any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be // closing database connections, or sending a alert that the application is // closing. func DeferExitHandler(handler func()) { handlers = append([]func(){handler}, handlers...) }
9,624
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/CHANGELOG.md
# 1.6.0 Fixes: * end of line cleanup * revert the entry concurrency bug fix whic leads to deadlock under some circumstances * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 Features: * add an option to the `TextFormatter` to completely disable fields quoting # 1.5.0 Code quality: * add golangci linter run on travis Fixes: * add mutex for hooks concurrent access on `Entry` data * caller function field for go1.14 * fix build issue for gopherjs target Feature: * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level * add a `DisableHTMLEscape` option in the `JSONFormatter` * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` # 1.4.2 * Fixes build break for plan9, nacl, solaris # 1.4.1 This new release introduces: * Enhance TextFormatter to not print caller information when they are empty (#944) * Remove dependency on golang.org/x/crypto (#932, #943) Fixes: * Fix Entry.WithContext method to return a copy of the initial entry (#941) # 1.4.0 This new release introduces: * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). Fixes: * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) * Fix infinite recursion on unknown `Level.String()` (#907) * Fix race condition in `getCaller` (#916). # 1.3.0 This new release introduces: * Log, Logf, Logln functions for Logger and Entry that take a Level Fixes: * Building prometheus node_exporter on AIX (#840) * Race condition in TextFormatter (#468) * Travis CI import path (#868) * Remove coloured output on Windows (#862) * Pointer to func as field in JSONFormatter (#870) * Properly marshal Levels (#873) # 1.2.0 This new release introduces: * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued * A new trace level named `Trace` whose level is below `Debug` * A configurable exit function to be called upon a Fatal trace * The `Level` object now implements `encoding.TextUnmarshaler` interface # 1.1.1 This is a bug fix release. * fix the build break on Solaris * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized # 1.1.0 This new release introduces: * several fixes: * a fix for a race condition on entry formatting * proper cleanup of previously used entries before putting them back in the pool * the extra new line at the end of message in text formatter has been removed * a new global public API to check if a level is activated: IsLevelEnabled * the following methods have been added to the Logger object * IsLevelEnabled * SetFormatter * SetOutput * ReplaceHooks * introduction of go module * an indent configuration for the json formatter * output colour support for windows * the field sort function is now configurable for text formatter * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater # 1.0.6 This new release introduces: * a new api WithTime which allows to easily force the time of the log entry which is mostly useful for logger wrapper * a fix reverting the immutability of the entry given as parameter to the hooks a new configuration field of the json formatter in order to put all the fields in a nested dictionnary * a new SetOutput method in the Logger * a new configuration of the textformatter to configure the name of the default keys * a new configuration of the text formatter to disable the level truncation # 1.0.5 * Fix hooks race (#707) * Fix panic deadlock (#695) # 1.0.4 * Fix race when adding hooks (#612) * Fix terminal check in AppEngine (#635) # 1.0.3 * Replace example files with testable examples # 1.0.2 * bug: quote non-string values in text formatter (#583) * Make (*Logger) SetLevel a public method # 1.0.1 * bug: fix escaping in text formatter (#575) # 1.0.0 * Officially changed name to lower-case * bug: colors on Windows 10 (#541) * bug: fix race in accessing level (#512) # 0.11.5 * feature: add writer and writerlevel to entry (#372) # 0.11.4 * bug: fix undefined variable on solaris (#493) # 0.11.3 * formatter: configure quoting of empty values (#484) * formatter: configure quoting character (default is `"`) (#484) * bug: fix not importing io correctly in non-linux environments (#481) # 0.11.2 * bug: fix windows terminal detection (#476) # 0.11.1 * bug: fix tty detection with custom out (#471) # 0.11.0 * performance: Use bufferpool to allocate (#370) * terminal: terminal detection for app-engine (#343) * feature: exit handler (#375) # 0.10.0 * feature: Add a test hook (#180) * feature: `ParseLevel` is now case-insensitive (#326) * feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) * performance: avoid re-allocations on `WithFields` (#335) # 0.9.0 * logrus/text_formatter: don't emit empty msg * logrus/hooks/airbrake: move out of main repository * logrus/hooks/sentry: move out of main repository * logrus/hooks/papertrail: move out of main repository * logrus/hooks/bugsnag: move out of main repository * logrus/core: run tests with `-race` * logrus/core: detect TTY based on `stderr` * logrus/core: support `WithError` on logger * logrus/core: Solaris support # 0.8.7 * logrus/core: fix possible race (#216) * logrus/doc: small typo fixes and doc improvements # 0.8.6 * hooks/raven: allow passing an initialized client # 0.8.5 * logrus/core: revert #208 # 0.8.4 * formatter/text: fix data race (#218) # 0.8.3 * logrus/core: fix entry log level (#208) * logrus/core: improve performance of text formatter by 40% * logrus/core: expose `LevelHooks` type * logrus/core: add support for DragonflyBSD and NetBSD * formatter/text: print structs more verbosely # 0.8.2 * logrus: fix more Fatal family functions # 0.8.1 * logrus: fix not exiting on `Fatalf` and `Fatalln` # 0.8.0 * logrus: defaults to stderr instead of stdout * hooks/sentry: add special field for `*http.Request` * formatter/text: ignore Windows for colors # 0.7.3 * formatter/\*: allow configuration of timestamp layout # 0.7.2 * formatter/text: Add configuration option for time format (#158)
9,625
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/exported.go
package logrus import ( "context" "io" "time" ) var ( // std is the name of the standard logger in stdlib `log` std = New() ) func StandardLogger() *Logger { return std } // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { std.SetFormatter(formatter) } // SetReportCaller sets whether the standard logger will include the calling // method as a field. func SetReportCaller(include bool) { std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { return std.GetLevel() } // IsLevelEnabled checks if the log level of the standard logger is greater than the level param func IsLevelEnabled(level Level) bool { return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } // WithContext creates an entry from the standard logger and adds a context to it. func WithContext(ctx context.Context) *Entry { return std.WithContext(ctx) } // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithField(key string, value interface{}) *Entry { return std.WithField(key, value) } // WithFields creates an entry from the standard logger and adds multiple // fields to it. This is simply a helper for `WithField`, invoking it // once for each field. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithFields(fields Fields) *Entry { return std.WithFields(fields) } // WithTime creates an entry from the standard logger and overrides the time of // logs generated with it. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithTime(t time.Time) *Entry { return std.WithTime(t) } // Trace logs a message at level Trace on the standard logger. func Trace(args ...interface{}) { std.Trace(args...) } // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) } // Print logs a message at level Info on the standard logger. func Print(args ...interface{}) { std.Print(args...) } // Info logs a message at level Info on the standard logger. func Info(args ...interface{}) { std.Info(args...) } // Warn logs a message at level Warn on the standard logger. func Warn(args ...interface{}) { std.Warn(args...) } // Warning logs a message at level Warn on the standard logger. func Warning(args ...interface{}) { std.Warning(args...) } // Error logs a message at level Error on the standard logger. func Error(args ...interface{}) { std.Error(args...) } // Panic logs a message at level Panic on the standard logger. func Panic(args ...interface{}) { std.Panic(args...) } // Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } // TraceFn logs a message from a func at level Trace on the standard logger. func TraceFn(fn LogFunction) { std.TraceFn(fn) } // DebugFn logs a message from a func at level Debug on the standard logger. func DebugFn(fn LogFunction) { std.DebugFn(fn) } // PrintFn logs a message from a func at level Info on the standard logger. func PrintFn(fn LogFunction) { std.PrintFn(fn) } // InfoFn logs a message from a func at level Info on the standard logger. func InfoFn(fn LogFunction) { std.InfoFn(fn) } // WarnFn logs a message from a func at level Warn on the standard logger. func WarnFn(fn LogFunction) { std.WarnFn(fn) } // WarningFn logs a message from a func at level Warn on the standard logger. func WarningFn(fn LogFunction) { std.WarningFn(fn) } // ErrorFn logs a message from a func at level Error on the standard logger. func ErrorFn(fn LogFunction) { std.ErrorFn(fn) } // PanicFn logs a message from a func at level Panic on the standard logger. func PanicFn(fn LogFunction) { std.PanicFn(fn) } // FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. func FatalFn(fn LogFunction) { std.FatalFn(fn) } // Tracef logs a message at level Trace on the standard logger. func Tracef(format string, args ...interface{}) { std.Tracef(format, args...) } // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) } // Printf logs a message at level Info on the standard logger. func Printf(format string, args ...interface{}) { std.Printf(format, args...) } // Infof logs a message at level Info on the standard logger. func Infof(format string, args ...interface{}) { std.Infof(format, args...) } // Warnf logs a message at level Warn on the standard logger. func Warnf(format string, args ...interface{}) { std.Warnf(format, args...) } // Warningf logs a message at level Warn on the standard logger. func Warningf(format string, args ...interface{}) { std.Warningf(format, args...) } // Errorf logs a message at level Error on the standard logger. func Errorf(format string, args ...interface{}) { std.Errorf(format, args...) } // Panicf logs a message at level Panic on the standard logger. func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } // Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } // Traceln logs a message at level Trace on the standard logger. func Traceln(args ...interface{}) { std.Traceln(args...) } // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) } // Println logs a message at level Info on the standard logger. func Println(args ...interface{}) { std.Println(args...) } // Infoln logs a message at level Info on the standard logger. func Infoln(args ...interface{}) { std.Infoln(args...) } // Warnln logs a message at level Warn on the standard logger. func Warnln(args ...interface{}) { std.Warnln(args...) } // Warningln logs a message at level Warn on the standard logger. func Warningln(args ...interface{}) { std.Warningln(args...) } // Errorln logs a message at level Error on the standard logger. func Errorln(args ...interface{}) { std.Errorln(args...) } // Panicln logs a message at level Panic on the standard logger. func Panicln(args ...interface{}) { std.Panicln(args...) } // Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) }
9,626
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/.golangci.yml
run: # do not run on test files yet tests: false # all available settings of specific linters linters-settings: errcheck: # report about not checking of errors in type assetions: `a := b.(MyStruct)`; # default is false: such cases aren't reported by default. check-type-assertions: false # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; # default is false: such cases aren't reported by default. check-blank: false lll: line-length: 100 tab-width: 4 prealloc: simple: false range-loops: false for-loops: false whitespace: multi-if: false # Enforces newlines (or comments) after every multi-line if statement multi-func: false # Enforces newlines (or comments) after every multi-line function signature linters: enable: - megacheck - govet disable: - maligned - prealloc disable-all: false presets: - bugs - unused fast: false
9,627
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/LICENSE
The MIT License (MIT) Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,628
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/appveyor.yml
version: "{build}" platform: x64 clone_folder: c:\gopath\src\github.com\sirupsen\logrus environment: GOPATH: c:\gopath branches: only: - master install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - go version build_script: - go get -t - go test
9,629
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
package logrus import ( "golang.org/x/sys/unix" ) // IsTerminal returns true if the given file descriptor is a terminal. func isTerminal(fd int) bool { _, err := unix.IoctlGetTermio(fd, unix.TCGETA) return err == nil }
9,630
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/writer.go
package logrus import ( "bufio" "io" "runtime" ) // Writer at INFO level. See WriterLevel for details. func (logger *Logger) Writer() *io.PipeWriter { return logger.WriterLevel(InfoLevel) } // WriterLevel returns an io.Writer that can be used to write arbitrary text to // the logger at the given log level. Each line written to the writer will be // printed in the usual way using formatters and hooks. The writer is part of an // io.Pipe and it is the callers responsibility to close the writer when done. // This can be used to override the standard library logger easily. func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) switch level { case TraceLevel: printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: printFunc = entry.Info case WarnLevel: printFunc = entry.Warn case ErrorLevel: printFunc = entry.Error case FatalLevel: printFunc = entry.Fatal case PanicLevel: printFunc = entry.Panic default: printFunc = entry.Print } go entry.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer } func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) for scanner.Scan() { printFunc(scanner.Text()) } if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } reader.Close() } func writerFinalizer(writer *io.PipeWriter) { writer.Close() }
9,631
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
// +build appengine package logrus import ( "io" ) func checkIfTerminal(w io.Writer) bool { return true }
9,632
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/doc.go
/* Package logrus is a structured logger for Go, completely API compatible with the standard library logger. The simplest way to use Logrus is simply the package-level exported logger: package main import ( log "github.com/sirupsen/logrus" ) func main() { log.WithFields(log.Fields{ "animal": "walrus", "number": 1, "size": 10, }).Info("A walrus appears") } Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 For a full guide visit https://github.com/sirupsen/logrus */ package logrus
9,633
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/hooks.go
package logrus // A hook to be fired when logging on the logging levels returned from // `Levels()` on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such // functionality yourself if your call is non-blocking and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } // Internal type for storing the hooks on a logger instance. type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. func (hooks LevelHooks) Add(hook Hook) { for _, level := range hook.Levels() { hooks[level] = append(hooks[level], hook) } } // Fire all the hooks for the passed level. Used by `entry.log` to fire // appropriate hooks for a log entry. func (hooks LevelHooks) Fire(level Level, entry *Entry) error { for _, hook := range hooks[level] { if err := hook.Fire(entry); err != nil { return err } } return nil }
9,634
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
// +build js nacl plan9 package logrus import ( "io" ) func checkIfTerminal(w io.Writer) bool { return false }
9,635
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/json_formatter.go
package logrus import ( "bytes" "encoding/json" "fmt" "runtime" ) type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k } return string(key) } // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool // DisableHTMLEscape allows disabling html escaping in output DisableHTMLEscape bool // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. DataKey string // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", // FieldKeyMsg: "@message", // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap // CallerPrettyfier can be set by the user to modify the content // of the function and file keys in the json data when ReportCaller is // activated. If any of the returned value is the empty string the // corresponding key will be removed from json fields. CallerPrettyfier func(*runtime.Frame) (function string, file string) // PrettyPrint will indent all json logs PrettyPrint bool } // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: // Otherwise errors are ignored by `encoding/json` // https://github.com/sirupsen/logrus/issues/137 data[k] = v.Error() default: data[k] = v } } if f.DataKey != "" { newData := make(Fields, 4) newData[f.DataKey] = data data = newData } prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } if entry.err != "" { data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() if entry.HasCaller() { funcVal := entry.Caller.Function fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } if funcVal != "" { data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal } if fileVal != "" { data[f.FieldMap.resolve(FieldKeyFile)] = fileVal } } var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } encoder := json.NewEncoder(b) encoder.SetEscapeHTML(!f.DisableHTMLEscape) if f.PrettyPrint { encoder.SetIndent("", " ") } if err := encoder.Encode(data); err != nil { return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) } return b.Bytes(), nil }
9,636
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/go.sum
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
9,637
0
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen
kubeflow_public_repos/fate-operator/vendor/github.com/sirupsen/logrus/.travis.yml
language: go go_import_path: github.com/sirupsen/logrus git: depth: 1 env: - GO111MODULE=on go: [1.13.x, 1.14.x] os: [linux, osx] install: - ./travis/install.sh script: - ./travis/cross_build.sh - ./travis/lint.sh - export GOMAXPROCS=4 - export GORACE=halt_on_error=1 - go test -race -v ./... - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
9,638
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/LICENSE
The MIT License (MIT) Copyright (c) 2016 Sergey Kamardin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,639
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/glob.go
package glob import ( "github.com/gobwas/glob/compiler" "github.com/gobwas/glob/syntax" ) // Glob represents compiled glob pattern. type Glob interface { Match(string) bool } // Compile creates Glob for given pattern and strings (if any present after pattern) as separators. // The pattern syntax is: // // pattern: // { term } // // term: // `*` matches any sequence of non-separator characters // `**` matches any sequence of characters // `?` matches any single non-separator character // `[` [ `!` ] { character-range } `]` // character class (must be non-empty) // `{` pattern-list `}` // pattern alternatives // c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) // `\` c matches character c // // character-range: // c matches character c (c != `\\`, `-`, `]`) // `\` c matches character c // lo `-` hi matches character c for lo <= c <= hi // // pattern-list: // pattern { `,` pattern } // comma-separated (without spaces) patterns // func Compile(pattern string, separators ...rune) (Glob, error) { ast, err := syntax.Parse(pattern) if err != nil { return nil, err } matcher, err := compiler.Compile(ast, separators) if err != nil { return nil, err } return matcher, nil } // MustCompile is the same as Compile, except that if Compile returns error, this will panic func MustCompile(pattern string, separators ...rune) Glob { g, err := Compile(pattern, separators...) if err != nil { panic(err) } return g } // QuoteMeta returns a string that quotes all glob pattern meta characters // inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. func QuoteMeta(s string) string { b := make([]byte, 2*len(s)) // a byte loop is correct because all meta characters are ASCII j := 0 for i := 0; i < len(s); i++ { if syntax.Special(s[i]) { b[j] = '\\' j++ } b[j] = s[i] j++ } return string(b[0:j]) }
9,640
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/readme.md
# glob.[go](https://golang.org) [![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] > Go Globbing Library. ## Install ```shell go get github.com/gobwas/glob ``` ## Example ```go package main import "github.com/gobwas/glob" func main() { var g glob.Glob // create simple glob g = glob.MustCompile("*.github.com") g.Match("api.github.com") // true // quote meta characters and then create simple glob g = glob.MustCompile(glob.QuoteMeta("*.github.com")) g.Match("*.github.com") // true // create new glob with set of delimiters as ["."] g = glob.MustCompile("api.*.com", '.') g.Match("api.github.com") // true g.Match("api.gi.hub.com") // false // create new glob with set of delimiters as ["."] // but now with super wildcard g = glob.MustCompile("api.**.com", '.') g.Match("api.github.com") // true g.Match("api.gi.hub.com") // true // create glob with single symbol wildcard g = glob.MustCompile("?at") g.Match("cat") // true g.Match("fat") // true g.Match("at") // false // create glob with single symbol wildcard and delimiters ['f'] g = glob.MustCompile("?at", 'f') g.Match("cat") // true g.Match("fat") // false g.Match("at") // false // create glob with character-list matchers g = glob.MustCompile("[abc]at") g.Match("cat") // true g.Match("bat") // true g.Match("fat") // false g.Match("at") // false // create glob with character-list matchers g = glob.MustCompile("[!abc]at") g.Match("cat") // false g.Match("bat") // false g.Match("fat") // true g.Match("at") // false // create glob with character-range matchers g = glob.MustCompile("[a-c]at") g.Match("cat") // true g.Match("bat") // true g.Match("fat") // false g.Match("at") // false // create glob with character-range matchers g = glob.MustCompile("[!a-c]at") g.Match("cat") // false g.Match("bat") // false g.Match("fat") // true g.Match("at") // false // create glob with pattern-alternatives list g = glob.MustCompile("{cat,bat,[fr]at}") g.Match("cat") // true g.Match("bat") // true g.Match("fat") // true g.Match("rat") // true g.Match("at") // false g.Match("zat") // false } ``` ## Performance This library is created for compile-once patterns. This means, that compilation could take time, but strings matching is done faster, than in case when always parsing template. If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. Run `go test -bench=.` from source root to see the benchmarks: Pattern | Fixture | Match | Speed (ns/op) --------|---------|-------|-------------- `[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 `[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 `https://*.google.*` | `https://account.google.com` | `true` | 96 `https://*.google.*` | `https://google.com` | `false` | 66 `{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 `{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 `{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 `{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 `abc*` | `abcdef` | `true` | 8.15 `abc*` | `af` | `false` | 5.68 `*def` | `abcdef` | `true` | 8.84 `*def` | `af` | `false` | 5.74 `ab*ef` | `abcdef` | `true` | 15.2 `ab*ef` | `af` | `false` | 10.4 The same things with `regexp` package: Pattern | Fixture | Match | Speed (ns/op) --------|---------|-------|-------------- `^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 `^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 `^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 `^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 `^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 `^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 `^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 `^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 `^abc.*$` | `abcdef` | `true` | 237 `^abc.*$` | `af` | `false` | 100 `^.*def$` | `abcdef` | `true` | 464 `^.*def$` | `af` | `false` | 265 `^ab.*ef$` | `abcdef` | `true` | 375 `^ab.*ef$` | `af` | `false` | 145 [godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg [godoc-url]: https://godoc.org/github.com/gobwas/glob [travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master [travis-url]: https://travis-ci.org/gobwas/glob ## Syntax Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), except that `**` is aka super-asterisk, that do not sensitive for separators.
9,641
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/.travis.yml
sudo: false language: go go: - 1.5.3 script: - go test -v ./...
9,642
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/bench.sh
#! /bin/bash bench() { filename="/tmp/$1-$2.bench" if test -e "${filename}"; then echo "Already exists ${filename}" else backup=`git rev-parse --abbrev-ref HEAD` git checkout $1 echo -n "Creating ${filename}... " go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem echo "OK" git checkout ${backup} sleep 5 fi } to=$1 current=`git rev-parse --abbrev-ref HEAD` bench ${to} $2 bench ${current} $2 benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"
9,643
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/util
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/util/strings/strings.go
package strings import ( "strings" "unicode/utf8" ) func IndexAnyRunes(s string, rs []rune) int { for _, r := range rs { if i := strings.IndexRune(s, r); i != -1 { return i } } return -1 } func LastIndexAnyRunes(s string, rs []rune) int { for _, r := range rs { i := -1 if 0 <= r && r < utf8.RuneSelf { i = strings.LastIndexByte(s, byte(r)) } else { sub := s for len(sub) > 0 { j := strings.IndexRune(s, r) if j == -1 { break } i = j sub = sub[i+1:] } } if i != -1 { return i } } return -1 }
9,644
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/util
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/util/runes/runes.go
package runes func Index(s, needle []rune) int { ls, ln := len(s), len(needle) switch { case ln == 0: return 0 case ln == 1: return IndexRune(s, needle[0]) case ln == ls: if Equal(s, needle) { return 0 } return -1 case ln > ls: return -1 } head: for i := 0; i < ls && ls-i >= ln; i++ { for y := 0; y < ln; y++ { if s[i+y] != needle[y] { continue head } } return i } return -1 } func LastIndex(s, needle []rune) int { ls, ln := len(s), len(needle) switch { case ln == 0: if ls == 0 { return 0 } return ls case ln == 1: return IndexLastRune(s, needle[0]) case ln == ls: if Equal(s, needle) { return 0 } return -1 case ln > ls: return -1 } head: for i := ls - 1; i >= 0 && i >= ln; i-- { for y := ln - 1; y >= 0; y-- { if s[i-(ln-y-1)] != needle[y] { continue head } } return i - ln + 1 } return -1 } // IndexAny returns the index of the first instance of any Unicode code point // from chars in s, or -1 if no Unicode code point from chars is present in s. func IndexAny(s, chars []rune) int { if len(chars) > 0 { for i, c := range s { for _, m := range chars { if c == m { return i } } } } return -1 } func Contains(s, needle []rune) bool { return Index(s, needle) >= 0 } func Max(s []rune) (max rune) { for _, r := range s { if r > max { max = r } } return } func Min(s []rune) rune { min := rune(-1) for _, r := range s { if min == -1 { min = r continue } if r < min { min = r } } return min } func IndexRune(s []rune, r rune) int { for i, c := range s { if c == r { return i } } return -1 } func IndexLastRune(s []rune, r rune) int { for i := len(s) - 1; i >= 0; i-- { if s[i] == r { return i } } return -1 } func Equal(a, b []rune) bool { if len(a) == len(b) { for i := 0; i < len(a); i++ { if a[i] != b[i] { return false } } return true } return false } // HasPrefix tests whether the string s begins with prefix. func HasPrefix(s, prefix []rune) bool { return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) } // HasSuffix tests whether the string s ends with suffix. func HasSuffix(s, suffix []rune) bool { return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) }
9,645
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax/syntax.go
package syntax import ( "github.com/gobwas/glob/syntax/ast" "github.com/gobwas/glob/syntax/lexer" ) func Parse(s string) (*ast.Node, error) { return ast.Parse(lexer.NewLexer(s)) } func Special(b byte) bool { return lexer.Special(b) }
9,646
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax/ast/ast.go
package ast import ( "bytes" "fmt" ) type Node struct { Parent *Node Children []*Node Value interface{} Kind Kind } func NewNode(k Kind, v interface{}, ch ...*Node) *Node { n := &Node{ Kind: k, Value: v, } for _, c := range ch { Insert(n, c) } return n } func (a *Node) Equal(b *Node) bool { if a.Kind != b.Kind { return false } if a.Value != b.Value { return false } if len(a.Children) != len(b.Children) { return false } for i, c := range a.Children { if !c.Equal(b.Children[i]) { return false } } return true } func (a *Node) String() string { var buf bytes.Buffer buf.WriteString(a.Kind.String()) if a.Value != nil { buf.WriteString(" =") buf.WriteString(fmt.Sprintf("%v", a.Value)) } if len(a.Children) > 0 { buf.WriteString(" [") for i, c := range a.Children { if i > 0 { buf.WriteString(", ") } buf.WriteString(c.String()) } buf.WriteString("]") } return buf.String() } func Insert(parent *Node, children ...*Node) { parent.Children = append(parent.Children, children...) for _, ch := range children { ch.Parent = parent } } type List struct { Not bool Chars string } type Range struct { Not bool Lo, Hi rune } type Text struct { Text string } type Kind int const ( KindNothing Kind = iota KindPattern KindList KindRange KindText KindAny KindSuper KindSingle KindAnyOf ) func (k Kind) String() string { switch k { case KindNothing: return "Nothing" case KindPattern: return "Pattern" case KindList: return "List" case KindRange: return "Range" case KindText: return "Text" case KindAny: return "Any" case KindSuper: return "Super" case KindSingle: return "Single" case KindAnyOf: return "AnyOf" default: return "" } }
9,647
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax/ast/parser.go
package ast import ( "errors" "fmt" "github.com/gobwas/glob/syntax/lexer" "unicode/utf8" ) type Lexer interface { Next() lexer.Token } type parseFn func(*Node, Lexer) (parseFn, *Node, error) func Parse(lexer Lexer) (*Node, error) { var parser parseFn root := NewNode(KindPattern, nil) var ( tree *Node err error ) for parser, tree = parserMain, root; parser != nil; { parser, tree, err = parser(tree, lexer) if err != nil { return nil, err } } return root, nil } func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { for { token := lex.Next() switch token.Type { case lexer.EOF: return nil, tree, nil case lexer.Error: return nil, tree, errors.New(token.Raw) case lexer.Text: Insert(tree, NewNode(KindText, Text{token.Raw})) return parserMain, tree, nil case lexer.Any: Insert(tree, NewNode(KindAny, nil)) return parserMain, tree, nil case lexer.Super: Insert(tree, NewNode(KindSuper, nil)) return parserMain, tree, nil case lexer.Single: Insert(tree, NewNode(KindSingle, nil)) return parserMain, tree, nil case lexer.RangeOpen: return parserRange, tree, nil case lexer.TermsOpen: a := NewNode(KindAnyOf, nil) Insert(tree, a) p := NewNode(KindPattern, nil) Insert(a, p) return parserMain, p, nil case lexer.Separator: p := NewNode(KindPattern, nil) Insert(tree.Parent, p) return parserMain, p, nil case lexer.TermsClose: return parserMain, tree.Parent.Parent, nil default: return nil, tree, fmt.Errorf("unexpected token: %s", token) } } return nil, tree, fmt.Errorf("unknown error") } func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { var ( not bool lo rune hi rune chars string ) for { token := lex.Next() switch token.Type { case lexer.EOF: return nil, tree, errors.New("unexpected end") case lexer.Error: return nil, tree, errors.New(token.Raw) case lexer.Not: not = true case lexer.RangeLo: r, w := utf8.DecodeRuneInString(token.Raw) if len(token.Raw) > w { return nil, tree, fmt.Errorf("unexpected length of lo character") } lo = r case lexer.RangeBetween: // case lexer.RangeHi: r, w := utf8.DecodeRuneInString(token.Raw) if len(token.Raw) > w { return nil, tree, fmt.Errorf("unexpected length of lo character") } hi = r if hi < lo { return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) } case lexer.Text: chars = token.Raw case lexer.RangeClose: isRange := lo != 0 && hi != 0 isChars := chars != "" if isChars == isRange { return nil, tree, fmt.Errorf("could not parse range") } if isRange { Insert(tree, NewNode(KindRange, Range{ Lo: lo, Hi: hi, Not: not, })) } else { Insert(tree, NewNode(KindList, List{ Chars: chars, Not: not, })) } return parserMain, tree, nil } } }
9,648
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax/lexer/token.go
package lexer import "fmt" type TokenType int const ( EOF TokenType = iota Error Text Char Any Super Single Not Separator RangeOpen RangeClose RangeLo RangeHi RangeBetween TermsOpen TermsClose ) func (tt TokenType) String() string { switch tt { case EOF: return "eof" case Error: return "error" case Text: return "text" case Char: return "char" case Any: return "any" case Super: return "super" case Single: return "single" case Not: return "not" case Separator: return "separator" case RangeOpen: return "range_open" case RangeClose: return "range_close" case RangeLo: return "range_lo" case RangeHi: return "range_hi" case RangeBetween: return "range_between" case TermsOpen: return "terms_open" case TermsClose: return "terms_close" default: return "undef" } } type Token struct { Type TokenType Raw string } func (t Token) String() string { return fmt.Sprintf("%v<%q>", t.Type, t.Raw) }
9,649
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
package lexer import ( "bytes" "fmt" "github.com/gobwas/glob/util/runes" "unicode/utf8" ) const ( char_any = '*' char_comma = ',' char_single = '?' char_escape = '\\' char_range_open = '[' char_range_close = ']' char_terms_open = '{' char_terms_close = '}' char_range_not = '!' char_range_between = '-' ) var specials = []byte{ char_any, char_single, char_escape, char_range_open, char_range_close, char_terms_open, char_terms_close, } func Special(c byte) bool { return bytes.IndexByte(specials, c) != -1 } type tokens []Token func (i *tokens) shift() (ret Token) { ret = (*i)[0] copy(*i, (*i)[1:]) *i = (*i)[:len(*i)-1] return } func (i *tokens) push(v Token) { *i = append(*i, v) } func (i *tokens) empty() bool { return len(*i) == 0 } var eof rune = 0 type lexer struct { data string pos int err error tokens tokens termsLevel int lastRune rune lastRuneSize int hasRune bool } func NewLexer(source string) *lexer { l := &lexer{ data: source, tokens: tokens(make([]Token, 0, 4)), } return l } func (l *lexer) Next() Token { if l.err != nil { return Token{Error, l.err.Error()} } if !l.tokens.empty() { return l.tokens.shift() } l.fetchItem() return l.Next() } func (l *lexer) peek() (r rune, w int) { if l.pos == len(l.data) { return eof, 0 } r, w = utf8.DecodeRuneInString(l.data[l.pos:]) if r == utf8.RuneError { l.errorf("could not read rune") r = eof w = 0 } return } func (l *lexer) read() rune { if l.hasRune { l.hasRune = false l.seek(l.lastRuneSize) return l.lastRune } r, s := l.peek() l.seek(s) l.lastRune = r l.lastRuneSize = s return r } func (l *lexer) seek(w int) { l.pos += w } func (l *lexer) unread() { if l.hasRune { l.errorf("could not unread rune") return } l.seek(-l.lastRuneSize) l.hasRune = true } func (l *lexer) errorf(f string, v ...interface{}) { l.err = fmt.Errorf(f, v...) } func (l *lexer) inTerms() bool { return l.termsLevel > 0 } func (l *lexer) termsEnter() { l.termsLevel++ } func (l *lexer) termsLeave() { l.termsLevel-- } var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) func (l *lexer) fetchItem() { r := l.read() switch { case r == eof: l.tokens.push(Token{EOF, ""}) case r == char_terms_open: l.termsEnter() l.tokens.push(Token{TermsOpen, string(r)}) case r == char_comma && l.inTerms(): l.tokens.push(Token{Separator, string(r)}) case r == char_terms_close && l.inTerms(): l.tokens.push(Token{TermsClose, string(r)}) l.termsLeave() case r == char_range_open: l.tokens.push(Token{RangeOpen, string(r)}) l.fetchRange() case r == char_single: l.tokens.push(Token{Single, string(r)}) case r == char_any: if l.read() == char_any { l.tokens.push(Token{Super, string(r) + string(r)}) } else { l.unread() l.tokens.push(Token{Any, string(r)}) } default: l.unread() var breakers []rune if l.inTerms() { breakers = inTermsBreakers } else { breakers = inTextBreakers } l.fetchText(breakers) } } func (l *lexer) fetchRange() { var wantHi bool var wantClose bool var seenNot bool for { r := l.read() if r == eof { l.errorf("unexpected end of input") return } if wantClose { if r != char_range_close { l.errorf("expected close range character") } else { l.tokens.push(Token{RangeClose, string(r)}) } return } if wantHi { l.tokens.push(Token{RangeHi, string(r)}) wantClose = true continue } if !seenNot && r == char_range_not { l.tokens.push(Token{Not, string(r)}) seenNot = true continue } if n, w := l.peek(); n == char_range_between { l.seek(w) l.tokens.push(Token{RangeLo, string(r)}) l.tokens.push(Token{RangeBetween, string(n)}) wantHi = true continue } l.unread() // unread first peek and fetch as text l.fetchText([]rune{char_range_close}) wantClose = true } } func (l *lexer) fetchText(breakers []rune) { var data []rune var escaped bool reading: for { r := l.read() if r == eof { break } if !escaped { if r == char_escape { escaped = true continue } if runes.IndexRune(breakers, r) != -1 { l.unread() break reading } } escaped = false data = append(data, r) } if len(data) > 0 { l.tokens.push(Token{Text, string(data)}) } }
9,650
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/range.go
package match import ( "fmt" "unicode/utf8" ) type Range struct { Lo, Hi rune Not bool } func NewRange(lo, hi rune, not bool) Range { return Range{lo, hi, not} } func (self Range) Len() int { return lenOne } func (self Range) Match(s string) bool { r, w := utf8.DecodeRuneInString(s) if len(s) > w { return false } inRange := r >= self.Lo && r <= self.Hi return inRange == !self.Not } func (self Range) Index(s string) (int, []int) { for i, r := range s { if self.Not != (r >= self.Lo && r <= self.Hi) { return i, segmentsByRuneLength[utf8.RuneLen(r)] } } return -1, nil } func (self Range) String() string { var not string if self.Not { not = "!" } return fmt.Sprintf("<range:%s[%s,%s]>", not, string(self.Lo), string(self.Hi)) }
9,651
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/match.go
package match // todo common table of rune's length import ( "fmt" "strings" ) const lenOne = 1 const lenZero = 0 const lenNo = -1 type Matcher interface { Match(string) bool Index(string) (int, []int) Len() int String() string } type Matchers []Matcher func (m Matchers) String() string { var s []string for _, matcher := range m { s = append(s, fmt.Sprint(matcher)) } return fmt.Sprintf("%s", strings.Join(s, ",")) } // appendMerge merges and sorts given already SORTED and UNIQUE segments. func appendMerge(target, sub []int) []int { lt, ls := len(target), len(sub) out := make([]int, 0, lt+ls) for x, y := 0, 0; x < lt || y < ls; { if x >= lt { out = append(out, sub[y:]...) break } if y >= ls { out = append(out, target[x:]...) break } xValue := target[x] yValue := sub[y] switch { case xValue == yValue: out = append(out, xValue) x++ y++ case xValue < yValue: out = append(out, xValue) x++ case yValue < xValue: out = append(out, yValue) y++ } } target = append(target[:0], out...) return target } func reverseSegments(input []int) { l := len(input) m := l / 2 for i := 0; i < m; i++ { input[i], input[l-i-1] = input[l-i-1], input[i] } }
9,652
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/contains.go
package match import ( "fmt" "strings" ) type Contains struct { Needle string Not bool } func NewContains(needle string, not bool) Contains { return Contains{needle, not} } func (self Contains) Match(s string) bool { return strings.Contains(s, self.Needle) != self.Not } func (self Contains) Index(s string) (int, []int) { var offset int idx := strings.Index(s, self.Needle) if !self.Not { if idx == -1 { return -1, nil } offset = idx + len(self.Needle) if len(s) <= offset { return 0, []int{offset} } s = s[offset:] } else if idx != -1 { s = s[:idx] } segments := acquireSegments(len(s) + 1) for i := range s { segments = append(segments, offset+i) } return 0, append(segments, offset+len(s)) } func (self Contains) Len() int { return lenNo } func (self Contains) String() string { var not string if self.Not { not = "!" } return fmt.Sprintf("<contains:%s[%s]>", not, self.Needle) }
9,653
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/suffix.go
package match import ( "fmt" "strings" ) type Suffix struct { Suffix string } func NewSuffix(s string) Suffix { return Suffix{s} } func (self Suffix) Len() int { return lenNo } func (self Suffix) Match(s string) bool { return strings.HasSuffix(s, self.Suffix) } func (self Suffix) Index(s string) (int, []int) { idx := strings.Index(s, self.Suffix) if idx == -1 { return -1, nil } return 0, []int{idx + len(self.Suffix)} } func (self Suffix) String() string { return fmt.Sprintf("<suffix:%s>", self.Suffix) }
9,654
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/nothing.go
package match import ( "fmt" ) type Nothing struct{} func NewNothing() Nothing { return Nothing{} } func (self Nothing) Match(s string) bool { return len(s) == 0 } func (self Nothing) Index(s string) (int, []int) { return 0, segments0 } func (self Nothing) Len() int { return lenZero } func (self Nothing) String() string { return fmt.Sprintf("<nothing>") }
9,655
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/row.go
package match import ( "fmt" ) type Row struct { Matchers Matchers RunesLength int Segments []int } func NewRow(len int, m ...Matcher) Row { return Row{ Matchers: Matchers(m), RunesLength: len, Segments: []int{len}, } } func (self Row) matchAll(s string) bool { var idx int for _, m := range self.Matchers { length := m.Len() var next, i int for next = range s[idx:] { i++ if i == length { break } } if i < length || !m.Match(s[idx:idx+next+1]) { return false } idx += next + 1 } return true } func (self Row) lenOk(s string) bool { var i int for range s { i++ if i > self.RunesLength { return false } } return self.RunesLength == i } func (self Row) Match(s string) bool { return self.lenOk(s) && self.matchAll(s) } func (self Row) Len() (l int) { return self.RunesLength } func (self Row) Index(s string) (int, []int) { for i := range s { if len(s[i:]) < self.RunesLength { break } if self.matchAll(s[i:]) { return i, self.Segments } } return -1, nil } func (self Row) String() string { return fmt.Sprintf("<row_%d:[%s]>", self.RunesLength, self.Matchers) }
9,656
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/text.go
package match import ( "fmt" "strings" "unicode/utf8" ) // raw represents raw string to match type Text struct { Str string RunesLength int BytesLength int Segments []int } func NewText(s string) Text { return Text{ Str: s, RunesLength: utf8.RuneCountInString(s), BytesLength: len(s), Segments: []int{len(s)}, } } func (self Text) Match(s string) bool { return self.Str == s } func (self Text) Len() int { return self.RunesLength } func (self Text) Index(s string) (int, []int) { index := strings.Index(s, self.Str) if index == -1 { return -1, nil } return index, self.Segments } func (self Text) String() string { return fmt.Sprintf("<text:`%v`>", self.Str) }
9,657
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/suffix_any.go
package match import ( "fmt" "strings" sutil "github.com/gobwas/glob/util/strings" ) type SuffixAny struct { Suffix string Separators []rune } func NewSuffixAny(s string, sep []rune) SuffixAny { return SuffixAny{s, sep} } func (self SuffixAny) Index(s string) (int, []int) { idx := strings.Index(s, self.Suffix) if idx == -1 { return -1, nil } i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 return i, []int{idx + len(self.Suffix) - i} } func (self SuffixAny) Len() int { return lenNo } func (self SuffixAny) Match(s string) bool { if !strings.HasSuffix(s, self.Suffix) { return false } return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 } func (self SuffixAny) String() string { return fmt.Sprintf("<suffix_any:![%s]%s>", string(self.Separators), self.Suffix) }
9,658
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/btree.go
package match import ( "fmt" "unicode/utf8" ) type BTree struct { Value Matcher Left Matcher Right Matcher ValueLengthRunes int LeftLengthRunes int RightLengthRunes int LengthRunes int } func NewBTree(Value, Left, Right Matcher) (tree BTree) { tree.Value = Value tree.Left = Left tree.Right = Right lenOk := true if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { lenOk = false } if Left != nil { if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { lenOk = false } } if Right != nil { if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { lenOk = false } } if lenOk { tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes } else { tree.LengthRunes = -1 } return tree } func (self BTree) Len() int { return self.LengthRunes } // todo? func (self BTree) Index(s string) (int, []int) { return -1, nil } func (self BTree) Match(s string) bool { inputLen := len(s) // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part // here we manipulating byte length for better optimizations // but these checks still works, cause minLen of 1-rune string is 1 byte. if self.LengthRunes != -1 && self.LengthRunes > inputLen { return false } // try to cut unnecessary parts // by knowledge of length of right and left part var offset, limit int if self.LeftLengthRunes >= 0 { offset = self.LeftLengthRunes } if self.RightLengthRunes >= 0 { limit = inputLen - self.RightLengthRunes } else { limit = inputLen } for offset < limit { // search for matching part in substring index, segments := self.Value.Index(s[offset:limit]) if index == -1 { releaseSegments(segments) return false } l := s[:offset+index] var left bool if self.Left != nil { left = self.Left.Match(l) } else { left = l == "" } if left { for i := len(segments) - 1; i >= 0; i-- { length := segments[i] var right bool var r string // if there is no string for the right branch if inputLen <= offset+index+length { r = "" } else { r = s[offset+index+length:] } if self.Right != nil { right = self.Right.Match(r) } else { right = r == "" } if right { releaseSegments(segments) return true } } } _, step := utf8.DecodeRuneInString(s[offset+index:]) offset += index + step releaseSegments(segments) } return false } func (self BTree) String() string { const n string = "<nil>" var l, r string if self.Left == nil { l = n } else { l = self.Left.String() } if self.Right == nil { r = n } else { r = self.Right.String() } return fmt.Sprintf("<btree:[%s<-%s->%s]>", l, self.Value, r) }
9,659
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/any.go
package match import ( "fmt" "github.com/gobwas/glob/util/strings" ) type Any struct { Separators []rune } func NewAny(s []rune) Any { return Any{s} } func (self Any) Match(s string) bool { return strings.IndexAnyRunes(s, self.Separators) == -1 } func (self Any) Index(s string) (int, []int) { found := strings.IndexAnyRunes(s, self.Separators) switch found { case -1: case 0: return 0, segments0 default: s = s[:found] } segments := acquireSegments(len(s)) for i := range s { segments = append(segments, i) } segments = append(segments, len(s)) return 0, segments } func (self Any) Len() int { return lenNo } func (self Any) String() string { return fmt.Sprintf("<any:![%s]>", string(self.Separators)) }
9,660
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/prefix_any.go
package match import ( "fmt" "strings" "unicode/utf8" sutil "github.com/gobwas/glob/util/strings" ) type PrefixAny struct { Prefix string Separators []rune } func NewPrefixAny(s string, sep []rune) PrefixAny { return PrefixAny{s, sep} } func (self PrefixAny) Index(s string) (int, []int) { idx := strings.Index(s, self.Prefix) if idx == -1 { return -1, nil } n := len(self.Prefix) sub := s[idx+n:] i := sutil.IndexAnyRunes(sub, self.Separators) if i > -1 { sub = sub[:i] } seg := acquireSegments(len(sub) + 1) seg = append(seg, n) for i, r := range sub { seg = append(seg, n+i+utf8.RuneLen(r)) } return idx, seg } func (self PrefixAny) Len() int { return lenNo } func (self PrefixAny) Match(s string) bool { if !strings.HasPrefix(s, self.Prefix) { return false } return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 } func (self PrefixAny) String() string { return fmt.Sprintf("<prefix_any:%s![%s]>", self.Prefix, string(self.Separators)) }
9,661
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/min.go
package match import ( "fmt" "unicode/utf8" ) type Min struct { Limit int } func NewMin(l int) Min { return Min{l} } func (self Min) Match(s string) bool { var l int for range s { l += 1 if l >= self.Limit { return true } } return false } func (self Min) Index(s string) (int, []int) { var count int c := len(s) - self.Limit + 1 if c <= 0 { return -1, nil } segments := acquireSegments(c) for i, r := range s { count++ if count >= self.Limit { segments = append(segments, i+utf8.RuneLen(r)) } } if len(segments) == 0 { return -1, nil } return 0, segments } func (self Min) Len() int { return lenNo } func (self Min) String() string { return fmt.Sprintf("<min:%d>", self.Limit) }
9,662
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/max.go
package match import ( "fmt" "unicode/utf8" ) type Max struct { Limit int } func NewMax(l int) Max { return Max{l} } func (self Max) Match(s string) bool { var l int for range s { l += 1 if l > self.Limit { return false } } return true } func (self Max) Index(s string) (int, []int) { segments := acquireSegments(self.Limit + 1) segments = append(segments, 0) var count int for i, r := range s { count++ if count > self.Limit { break } segments = append(segments, i+utf8.RuneLen(r)) } return 0, segments } func (self Max) Len() int { return lenNo } func (self Max) String() string { return fmt.Sprintf("<max:%d>", self.Limit) }
9,663
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/any_of.go
package match import "fmt" type AnyOf struct { Matchers Matchers } func NewAnyOf(m ...Matcher) AnyOf { return AnyOf{Matchers(m)} } func (self *AnyOf) Add(m Matcher) error { self.Matchers = append(self.Matchers, m) return nil } func (self AnyOf) Match(s string) bool { for _, m := range self.Matchers { if m.Match(s) { return true } } return false } func (self AnyOf) Index(s string) (int, []int) { index := -1 segments := acquireSegments(len(s)) for _, m := range self.Matchers { idx, seg := m.Index(s) if idx == -1 { continue } if index == -1 || idx < index { index = idx segments = append(segments[:0], seg...) continue } if idx > index { continue } // here idx == index segments = appendMerge(segments, seg) } if index == -1 { releaseSegments(segments) return -1, nil } return index, segments } func (self AnyOf) Len() (l int) { l = -1 for _, m := range self.Matchers { ml := m.Len() switch { case l == -1: l = ml continue case ml == -1: return -1 case l != ml: return -1 } } return } func (self AnyOf) String() string { return fmt.Sprintf("<any_of:[%s]>", self.Matchers) }
9,664
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/prefix_suffix.go
package match import ( "fmt" "strings" ) type PrefixSuffix struct { Prefix, Suffix string } func NewPrefixSuffix(p, s string) PrefixSuffix { return PrefixSuffix{p, s} } func (self PrefixSuffix) Index(s string) (int, []int) { prefixIdx := strings.Index(s, self.Prefix) if prefixIdx == -1 { return -1, nil } suffixLen := len(self.Suffix) if suffixLen <= 0 { return prefixIdx, []int{len(s) - prefixIdx} } if (len(s) - prefixIdx) <= 0 { return -1, nil } segments := acquireSegments(len(s) - prefixIdx) for sub := s[prefixIdx:]; ; { suffixIdx := strings.LastIndex(sub, self.Suffix) if suffixIdx == -1 { break } segments = append(segments, suffixIdx+suffixLen) sub = sub[:suffixIdx] } if len(segments) == 0 { releaseSegments(segments) return -1, nil } reverseSegments(segments) return prefixIdx, segments } func (self PrefixSuffix) Len() int { return lenNo } func (self PrefixSuffix) Match(s string) bool { return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) } func (self PrefixSuffix) String() string { return fmt.Sprintf("<prefix_suffix:[%s,%s]>", self.Prefix, self.Suffix) }
9,665
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/prefix.go
package match import ( "fmt" "strings" "unicode/utf8" ) type Prefix struct { Prefix string } func NewPrefix(p string) Prefix { return Prefix{p} } func (self Prefix) Index(s string) (int, []int) { idx := strings.Index(s, self.Prefix) if idx == -1 { return -1, nil } length := len(self.Prefix) var sub string if len(s) > idx+length { sub = s[idx+length:] } else { sub = "" } segments := acquireSegments(len(sub) + 1) segments = append(segments, length) for i, r := range sub { segments = append(segments, length+i+utf8.RuneLen(r)) } return idx, segments } func (self Prefix) Len() int { return lenNo } func (self Prefix) Match(s string) bool { return strings.HasPrefix(s, self.Prefix) } func (self Prefix) String() string { return fmt.Sprintf("<prefix:%s>", self.Prefix) }
9,666
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/every_of.go
package match import ( "fmt" ) type EveryOf struct { Matchers Matchers } func NewEveryOf(m ...Matcher) EveryOf { return EveryOf{Matchers(m)} } func (self *EveryOf) Add(m Matcher) error { self.Matchers = append(self.Matchers, m) return nil } func (self EveryOf) Len() (l int) { for _, m := range self.Matchers { if ml := m.Len(); l > 0 { l += ml } else { return -1 } } return } func (self EveryOf) Index(s string) (int, []int) { var index int var offset int // make `in` with cap as len(s), // cause it is the maximum size of output segments values next := acquireSegments(len(s)) current := acquireSegments(len(s)) sub := s for i, m := range self.Matchers { idx, seg := m.Index(sub) if idx == -1 { releaseSegments(next) releaseSegments(current) return -1, nil } if i == 0 { // we use copy here instead of `current = seg` // cause seg is a slice from reusable buffer `in` // and it could be overwritten in next iteration current = append(current, seg...) } else { // clear the next next = next[:0] delta := index - (idx + offset) for _, ex := range current { for _, n := range seg { if ex+delta == n { next = append(next, n) } } } if len(next) == 0 { releaseSegments(next) releaseSegments(current) return -1, nil } current = append(current[:0], next...) } index = idx + offset sub = s[index:] offset += idx } releaseSegments(next) return index, current } func (self EveryOf) Match(s string) bool { for _, m := range self.Matchers { if !m.Match(s) { return false } } return true } func (self EveryOf) String() string { return fmt.Sprintf("<every_of:[%s]>", self.Matchers) }
9,667
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/segments.go
package match import ( "sync" ) type SomePool interface { Get() []int Put([]int) } var segmentsPools [1024]sync.Pool func toPowerOfTwo(v int) int { v-- v |= v >> 1 v |= v >> 2 v |= v >> 4 v |= v >> 8 v |= v >> 16 v++ return v } const ( cacheFrom = 16 cacheToAndHigher = 1024 cacheFromIndex = 15 cacheToAndHigherIndex = 1023 ) var ( segments0 = []int{0} segments1 = []int{1} segments2 = []int{2} segments3 = []int{3} segments4 = []int{4} ) var segmentsByRuneLength [5][]int = [5][]int{ 0: segments0, 1: segments1, 2: segments2, 3: segments3, 4: segments4, } func init() { for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { func(i int) { segmentsPools[i-1] = sync.Pool{New: func() interface{} { return make([]int, 0, i) }} }(i) } } func getTableIndex(c int) int { p := toPowerOfTwo(c) switch { case p >= cacheToAndHigher: return cacheToAndHigherIndex case p <= cacheFrom: return cacheFromIndex default: return p - 1 } } func acquireSegments(c int) []int { // make []int with less capacity than cacheFrom // is faster than acquiring it from pool if c < cacheFrom { return make([]int, 0, c) } return segmentsPools[getTableIndex(c)].Get().([]int)[:0] } func releaseSegments(s []int) { c := cap(s) // make []int with less capacity than cacheFrom // is faster than acquiring it from pool if c < cacheFrom { return } segmentsPools[getTableIndex(c)].Put(s) }
9,668
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/list.go
package match import ( "fmt" "github.com/gobwas/glob/util/runes" "unicode/utf8" ) type List struct { List []rune Not bool } func NewList(list []rune, not bool) List { return List{list, not} } func (self List) Match(s string) bool { r, w := utf8.DecodeRuneInString(s) if len(s) > w { return false } inList := runes.IndexRune(self.List, r) != -1 return inList == !self.Not } func (self List) Len() int { return lenOne } func (self List) Index(s string) (int, []int) { for i, r := range s { if self.Not == (runes.IndexRune(self.List, r) == -1) { return i, segmentsByRuneLength[utf8.RuneLen(r)] } } return -1, nil } func (self List) String() string { var not string if self.Not { not = "!" } return fmt.Sprintf("<list:%s[%s]>", not, string(self.List)) }
9,669
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/super.go
package match import ( "fmt" ) type Super struct{} func NewSuper() Super { return Super{} } func (self Super) Match(s string) bool { return true } func (self Super) Len() int { return lenNo } func (self Super) Index(s string) (int, []int) { segments := acquireSegments(len(s) + 1) for i := range s { segments = append(segments, i) } segments = append(segments, len(s)) return 0, segments } func (self Super) String() string { return fmt.Sprintf("<super>") }
9,670
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/match/single.go
package match import ( "fmt" "github.com/gobwas/glob/util/runes" "unicode/utf8" ) // single represents ? type Single struct { Separators []rune } func NewSingle(s []rune) Single { return Single{s} } func (self Single) Match(s string) bool { r, w := utf8.DecodeRuneInString(s) if len(s) > w { return false } return runes.IndexRune(self.Separators, r) == -1 } func (self Single) Len() int { return lenOne } func (self Single) Index(s string) (int, []int) { for i, r := range s { if runes.IndexRune(self.Separators, r) == -1 { return i, segmentsByRuneLength[utf8.RuneLen(r)] } } return -1, nil } func (self Single) String() string { return fmt.Sprintf("<single:![%s]>", string(self.Separators)) }
9,671
0
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob
kubeflow_public_repos/fate-operator/vendor/github.com/gobwas/glob/compiler/compiler.go
package compiler // TODO use constructor with all matchers, and to their structs private // TODO glue multiple Text nodes (like after QuoteMeta) import ( "fmt" "reflect" "github.com/gobwas/glob/match" "github.com/gobwas/glob/syntax/ast" "github.com/gobwas/glob/util/runes" ) func optimizeMatcher(matcher match.Matcher) match.Matcher { switch m := matcher.(type) { case match.Any: if len(m.Separators) == 0 { return match.NewSuper() } case match.AnyOf: if len(m.Matchers) == 1 { return m.Matchers[0] } return m case match.List: if m.Not == false && len(m.List) == 1 { return match.NewText(string(m.List)) } return m case match.BTree: m.Left = optimizeMatcher(m.Left) m.Right = optimizeMatcher(m.Right) r, ok := m.Value.(match.Text) if !ok { return m } var ( leftNil = m.Left == nil rightNil = m.Right == nil ) if leftNil && rightNil { return match.NewText(r.Str) } _, leftSuper := m.Left.(match.Super) lp, leftPrefix := m.Left.(match.Prefix) la, leftAny := m.Left.(match.Any) _, rightSuper := m.Right.(match.Super) rs, rightSuffix := m.Right.(match.Suffix) ra, rightAny := m.Right.(match.Any) switch { case leftSuper && rightSuper: return match.NewContains(r.Str, false) case leftSuper && rightNil: return match.NewSuffix(r.Str) case rightSuper && leftNil: return match.NewPrefix(r.Str) case leftNil && rightSuffix: return match.NewPrefixSuffix(r.Str, rs.Suffix) case rightNil && leftPrefix: return match.NewPrefixSuffix(lp.Prefix, r.Str) case rightNil && leftAny: return match.NewSuffixAny(r.Str, la.Separators) case leftNil && rightAny: return match.NewPrefixAny(r.Str, ra.Separators) } return m } return matcher } func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { if len(matchers) == 0 { return nil, fmt.Errorf("compile error: need at least one matcher") } if len(matchers) == 1 { return matchers[0], nil } if m := glueMatchers(matchers); m != nil { return m, nil } idx := -1 maxLen := -1 var val match.Matcher for i, matcher := range matchers { if l := matcher.Len(); l != -1 && l >= maxLen { maxLen = l idx = i val = matcher } } if val == nil { // not found matcher with static length r, err := compileMatchers(matchers[1:]) if err != nil { return nil, err } return match.NewBTree(matchers[0], nil, r), nil } left := matchers[:idx] var right []match.Matcher if len(matchers) > idx+1 { right = matchers[idx+1:] } var l, r match.Matcher var err error if len(left) > 0 { l, err = compileMatchers(left) if err != nil { return nil, err } } if len(right) > 0 { r, err = compileMatchers(right) if err != nil { return nil, err } } return match.NewBTree(val, l, r), nil } func glueMatchers(matchers []match.Matcher) match.Matcher { if m := glueMatchersAsEvery(matchers); m != nil { return m } if m := glueMatchersAsRow(matchers); m != nil { return m } return nil } func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { if len(matchers) <= 1 { return nil } var ( c []match.Matcher l int ) for _, matcher := range matchers { if ml := matcher.Len(); ml == -1 { return nil } else { c = append(c, matcher) l += ml } } return match.NewRow(l, c...) } func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { if len(matchers) <= 1 { return nil } var ( hasAny bool hasSuper bool hasSingle bool min int separator []rune ) for i, matcher := range matchers { var sep []rune switch m := matcher.(type) { case match.Super: sep = []rune{} hasSuper = true case match.Any: sep = m.Separators hasAny = true case match.Single: sep = m.Separators hasSingle = true min++ case match.List: if !m.Not { return nil } sep = m.List hasSingle = true min++ default: return nil } // initialize if i == 0 { separator = sep } if runes.Equal(sep, separator) { continue } return nil } if hasSuper && !hasAny && !hasSingle { return match.NewSuper() } if hasAny && !hasSuper && !hasSingle { return match.NewAny(separator) } if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { return match.NewMin(min) } every := match.NewEveryOf() if min > 0 { every.Add(match.NewMin(min)) if !hasAny && !hasSuper { every.Add(match.NewMax(min)) } } if len(separator) > 0 { every.Add(match.NewContains(string(separator), true)) } return every } func minimizeMatchers(matchers []match.Matcher) []match.Matcher { var done match.Matcher var left, right, count int for l := 0; l < len(matchers); l++ { for r := len(matchers); r > l; r-- { if glued := glueMatchers(matchers[l:r]); glued != nil { var swap bool if done == nil { swap = true } else { cl, gl := done.Len(), glued.Len() swap = cl > -1 && gl > -1 && gl > cl swap = swap || count < r-l } if swap { done = glued left = l right = r count = r - l } } } } if done == nil { return matchers } next := append(append([]match.Matcher{}, matchers[:left]...), done) if right < len(matchers) { next = append(next, matchers[right:]...) } if len(next) == len(matchers) { return next } return minimizeMatchers(next) } // minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree func minimizeTree(tree *ast.Node) *ast.Node { switch tree.Kind { case ast.KindAnyOf: return minimizeTreeAnyOf(tree) default: return nil } } // minimizeAnyOf tries to find common children of given node of AnyOf pattern // it searches for common children from left and from right // if any common children are found – then it returns new optimized ast tree // else it returns nil func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { if !areOfSameKind(tree.Children, ast.KindPattern) { return nil } commonLeft, commonRight := commonChildren(tree.Children) commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts return nil } var result []*ast.Node if commonLeftCount > 0 { result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) } var anyOf []*ast.Node for _, child := range tree.Children { reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] var node *ast.Node if len(reuse) == 0 { // this pattern is completely reduced by commonLeft and commonRight patterns // so it become nothing node = ast.NewNode(ast.KindNothing, nil) } else { node = ast.NewNode(ast.KindPattern, nil, reuse...) } anyOf = appendIfUnique(anyOf, node) } switch { case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: result = append(result, anyOf[0]) case len(anyOf) > 1: result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) } if commonRightCount > 0 { result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) } return ast.NewNode(ast.KindPattern, nil, result...) } func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { if len(nodes) <= 1 { return } // find node that has least number of children idx := leastChildren(nodes) if idx == -1 { return } tree := nodes[idx] treeLength := len(tree.Children) // allocate max able size for rightCommon slice // to get ability insert elements in reverse order (from end to start) // without sorting commonRight = make([]*ast.Node, treeLength) lastRight := treeLength // will use this to get results as commonRight[lastRight:] var ( breakLeft bool breakRight bool commonTotal int ) for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { treeLeft := tree.Children[i] treeRight := tree.Children[j] for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { // skip least children node if k == idx { continue } restLeft := nodes[k].Children[i] restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] breakLeft = breakLeft || !treeLeft.Equal(restLeft) // disable searching for right common parts, if left part is already overlapping breakRight = breakRight || (!breakLeft && j <= i) breakRight = breakRight || !treeRight.Equal(restRight) } if !breakLeft { commonTotal++ commonLeft = append(commonLeft, treeLeft) } if !breakRight { commonTotal++ lastRight = j commonRight[j] = treeRight } } commonRight = commonRight[lastRight:] return } func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { for _, n := range target { if reflect.DeepEqual(n, val) { return target } } return append(target, val) } func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { for _, n := range nodes { if n.Kind != kind { return false } } return true } func leastChildren(nodes []*ast.Node) int { min := -1 idx := -1 for i, n := range nodes { if idx == -1 || (len(n.Children) < min) { min = len(n.Children) idx = i } } return idx } func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { var matchers []match.Matcher for _, desc := range tree.Children { m, err := compile(desc, sep) if err != nil { return nil, err } matchers = append(matchers, optimizeMatcher(m)) } return matchers, nil } func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { switch tree.Kind { case ast.KindAnyOf: // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) if n := minimizeTree(tree); n != nil { return compile(n, sep) } matchers, err := compileTreeChildren(tree, sep) if err != nil { return nil, err } return match.NewAnyOf(matchers...), nil case ast.KindPattern: if len(tree.Children) == 0 { return match.NewNothing(), nil } matchers, err := compileTreeChildren(tree, sep) if err != nil { return nil, err } m, err = compileMatchers(minimizeMatchers(matchers)) if err != nil { return nil, err } case ast.KindAny: m = match.NewAny(sep) case ast.KindSuper: m = match.NewSuper() case ast.KindSingle: m = match.NewSingle(sep) case ast.KindNothing: m = match.NewNothing() case ast.KindList: l := tree.Value.(ast.List) m = match.NewList([]rune(l.Chars), l.Not) case ast.KindRange: r := tree.Value.(ast.Range) m = match.NewRange(r.Lo, r.Hi, r.Not) case ast.KindText: t := tree.Value.(ast.Text) m = match.NewText(t.Text) default: return nil, fmt.Errorf("could not compile tree: unknown node type") } return optimizeMatcher(m), nil } func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { m, err := compile(tree, sep) if err != nil { return nil, err } return m, nil }
9,672
0
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt/tabwriter/README.md
This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package. It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license. The following additional features are supported: * `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called. * `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.
9,673
0
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt/tabwriter/LICENSE
Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9,674
0
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt/tabwriter/tabwriter.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package tabwriter implements a write filter (tabwriter.Writer) that // translates tabbed columns in input into properly aligned text. // // It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter), // based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a // with support for additional features. // // The package is using the Elastic Tabstops algorithm described at // http://nickgravgaard.com/elastictabstops/index.html. package tabwriter import ( "io" "unicode/utf8" ) // ---------------------------------------------------------------------------- // Filter implementation // A cell represents a segment of text terminated by tabs or line breaks. // The text itself is stored in a separate buffer; cell only describes the // segment's size in bytes, its width in runes, and whether it's an htab // ('\t') terminated cell. // type cell struct { size int // cell size in bytes width int // cell width in runes htab bool // true if the cell is terminated by an htab ('\t') } // A Writer is a filter that inserts padding around tab-delimited // columns in its input to align them in the output. // // The Writer treats incoming bytes as UTF-8-encoded text consisting // of cells terminated by horizontal ('\t') or vertical ('\v') tabs, // and newline ('\n') or formfeed ('\f') characters; both newline and // formfeed act as line breaks. // // Tab-terminated cells in contiguous lines constitute a column. The // Writer inserts padding as needed to make all cells in a column have // the same width, effectively aligning the columns. It assumes that // all characters have the same width, except for tabs for which a // tabwidth must be specified. Column cells must be tab-terminated, not // tab-separated: non-tab terminated trailing text at the end of a line // forms a cell but that cell is not part of an aligned column. // For instance, in this example (where | stands for a horizontal tab): // // aaaa|bbb|d // aa |b |dd // a | // aa |cccc|eee // // the b and c are in distinct columns (the b column is not contiguous // all the way). The d and e are not in a column at all (there's no // terminating tab, nor would the column be contiguous). // // The Writer assumes that all Unicode code points have the same width; // this may not be true in some fonts or if the string contains combining // characters. // // If DiscardEmptyColumns is set, empty columns that are terminated // entirely by vertical (or "soft") tabs are discarded. Columns // terminated by horizontal (or "hard") tabs are not affected by // this flag. // // If a Writer is configured to filter HTML, HTML tags and entities // are passed through. The widths of tags and entities are // assumed to be zero (tags) and one (entities) for formatting purposes. // // A segment of text may be escaped by bracketing it with Escape // characters. The tabwriter passes escaped text segments through // unchanged. In particular, it does not interpret any tabs or line // breaks within the segment. If the StripEscape flag is set, the // Escape characters are stripped from the output; otherwise they // are passed through as well. For the purpose of formatting, the // width of the escaped text is always computed excluding the Escape // characters. // // The formfeed character acts like a newline but it also terminates // all columns in the current line (effectively calling Flush). Tab- // terminated cells in the next line start new columns. Unless found // inside an HTML tag or inside an escaped text segment, formfeed // characters appear as newlines in the output. // // The Writer must buffer input internally, because proper spacing // of one line may depend on the cells in future lines. Clients must // call Flush when done calling Write. // type Writer struct { // configuration output io.Writer minwidth int tabwidth int padding int padbytes [8]byte flags uint // current state buf []byte // collected text excluding tabs or line breaks pos int // buffer position up to which cell.width of incomplete cell has been computed cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0) lines [][]cell // list of lines; each line is a list of cells widths []int // list of column widths in runes - re-used during formatting maxwidths []int // list of max column widths in runes } // addLine adds a new line. // flushed is a hint indicating whether the underlying writer was just flushed. // If so, the previous line is not likely to be a good indicator of the new line's cells. func (b *Writer) addLine(flushed bool) { // Grow slice instead of appending, // as that gives us an opportunity // to re-use an existing []cell. if n := len(b.lines) + 1; n <= cap(b.lines) { b.lines = b.lines[:n] b.lines[n-1] = b.lines[n-1][:0] } else { b.lines = append(b.lines, nil) } if !flushed { // The previous line is probably a good indicator // of how many cells the current line will have. // If the current line's capacity is smaller than that, // abandon it and make a new one. if n := len(b.lines); n >= 2 { if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) { b.lines[n-1] = make([]cell, 0, prev) } } } } // Reset the current state. func (b *Writer) reset() { b.buf = b.buf[:0] b.pos = 0 b.cell = cell{} b.endChar = 0 b.lines = b.lines[0:0] b.widths = b.widths[0:0] b.addLine(true) } // Internal representation (current state): // // - all text written is appended to buf; tabs and line breaks are stripped away // - at any given time there is a (possibly empty) incomplete cell at the end // (the cell starts after a tab or line break) // - cell.size is the number of bytes belonging to the cell so far // - cell.width is text width in runes of that cell from the start of the cell to // position pos; html tags and entities are excluded from this width if html // filtering is enabled // - the sizes and widths of processed text are kept in the lines list // which contains a list of cells for each line // - the widths list is a temporary list with current widths used during // formatting; it is kept in Writer because it's re-used // // |<---------- size ---------->| // | | // |<- width ->|<- ignored ->| | // | | | | // [---processed---tab------------<tag>...</tag>...] // ^ ^ ^ // | | | // buf start of incomplete cell pos // Formatting can be controlled with these flags. const ( // Ignore html tags and treat entities (starting with '&' // and ending in ';') as single characters (width = 1). FilterHTML uint = 1 << iota // Strip Escape characters bracketing escaped text segments // instead of passing them through unchanged with the text. StripEscape // Force right-alignment of cell content. // Default is left-alignment. AlignRight // Handle empty columns as if they were not present in // the input in the first place. DiscardEmptyColumns // Always use tabs for indentation columns (i.e., padding of // leading empty cells on the left) independent of padchar. TabIndent // Print a vertical bar ('|') between columns (after formatting). // Discarded columns appear as zero-width columns ("||"). Debug // Remember maximum widths seen per column even after Flush() is called. RememberWidths ) // A Writer must be initialized with a call to Init. The first parameter (output) // specifies the filter output. The remaining parameters control the formatting: // // minwidth minimal cell width including any padding // tabwidth width of tab characters (equivalent number of spaces) // padding padding added to a cell before computing its width // padchar ASCII char used for padding // if padchar == '\t', the Writer will assume that the // width of a '\t' in the formatted output is tabwidth, // and cells are left-aligned independent of align_left // (for correct-looking results, tabwidth must correspond // to the tab width in the viewer displaying the result) // flags formatting control // func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { if minwidth < 0 || tabwidth < 0 || padding < 0 { panic("negative minwidth, tabwidth, or padding") } b.output = output b.minwidth = minwidth b.tabwidth = tabwidth b.padding = padding for i := range b.padbytes { b.padbytes[i] = padchar } if padchar == '\t' { // tab padding enforces left-alignment flags &^= AlignRight } b.flags = flags b.reset() return b } // debugging support (keep code around) func (b *Writer) dump() { pos := 0 for i, line := range b.lines { print("(", i, ") ") for _, c := range line { print("[", string(b.buf[pos:pos+c.size]), "]") pos += c.size } print("\n") } print("\n") } // local error wrapper so we can distinguish errors we want to return // as errors from genuine panics (which we don't want to return as errors) type osError struct { err error } func (b *Writer) write0(buf []byte) { n, err := b.output.Write(buf) if n != len(buf) && err == nil { err = io.ErrShortWrite } if err != nil { panic(osError{err}) } } func (b *Writer) writeN(src []byte, n int) { for n > len(src) { b.write0(src) n -= len(src) } b.write0(src[0:n]) } var ( newline = []byte{'\n'} tabs = []byte("\t\t\t\t\t\t\t\t") ) func (b *Writer) writePadding(textw, cellw int, useTabs bool) { if b.padbytes[0] == '\t' || useTabs { // padding is done with tabs if b.tabwidth == 0 { return // tabs have no width - can't do any padding } // make cellw the smallest multiple of b.tabwidth cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth n := cellw - textw // amount of padding if n < 0 { panic("internal error") } b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth) return } // padding is done with non-tab characters b.writeN(b.padbytes[0:], cellw-textw) } var vbar = []byte{'|'} func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) { pos = pos0 for i := line0; i < line1; i++ { line := b.lines[i] // if TabIndent is set, use tabs to pad leading empty cells useTabs := b.flags&TabIndent != 0 for j, c := range line { if j > 0 && b.flags&Debug != 0 { // indicate column break b.write0(vbar) } if c.size == 0 { // empty cell if j < len(b.widths) { b.writePadding(c.width, b.widths[j], useTabs) } } else { // non-empty cell useTabs = false if b.flags&AlignRight == 0 { // align left b.write0(b.buf[pos : pos+c.size]) pos += c.size if j < len(b.widths) { b.writePadding(c.width, b.widths[j], false) } } else { // align right if j < len(b.widths) { b.writePadding(c.width, b.widths[j], false) } b.write0(b.buf[pos : pos+c.size]) pos += c.size } } } if i+1 == len(b.lines) { // last buffered line - we don't have a newline, so just write // any outstanding buffered data b.write0(b.buf[pos : pos+b.cell.size]) pos += b.cell.size } else { // not the last line - write newline b.write0(newline) } } return } // Format the text between line0 and line1 (excluding line1); pos // is the buffer position corresponding to the beginning of line0. // Returns the buffer position corresponding to the beginning of // line1 and an error, if any. // func (b *Writer) format(pos0 int, line0, line1 int) (pos int) { pos = pos0 column := len(b.widths) for this := line0; this < line1; this++ { line := b.lines[this] if column >= len(line)-1 { continue } // cell exists in this column => this line // has more cells than the previous line // (the last cell per line is ignored because cells are // tab-terminated; the last cell per line describes the // text before the newline/formfeed and does not belong // to a column) // print unprinted lines until beginning of block pos = b.writeLines(pos, line0, this) line0 = this // column block begin width := b.minwidth // minimal column width discardable := true // true if all cells in this column are empty and "soft" for ; this < line1; this++ { line = b.lines[this] if column >= len(line)-1 { break } // cell exists in this column c := line[column] // update width if w := c.width + b.padding; w > width { width = w } // update discardable if c.width > 0 || c.htab { discardable = false } } // column block end // discard empty columns if necessary if discardable && b.flags&DiscardEmptyColumns != 0 { width = 0 } if b.flags&RememberWidths != 0 { if len(b.maxwidths) < len(b.widths) { b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...) } switch { case len(b.maxwidths) == len(b.widths): b.maxwidths = append(b.maxwidths, width) case b.maxwidths[len(b.widths)] > width: width = b.maxwidths[len(b.widths)] case b.maxwidths[len(b.widths)] < width: b.maxwidths[len(b.widths)] = width } } // format and print all columns to the right of this column // (we know the widths of this column and all columns to the left) b.widths = append(b.widths, width) // push width pos = b.format(pos, line0, this) b.widths = b.widths[0 : len(b.widths)-1] // pop width line0 = this } // print unprinted lines until end return b.writeLines(pos, line0, line1) } // Append text to current cell. func (b *Writer) append(text []byte) { b.buf = append(b.buf, text...) b.cell.size += len(text) } // Update the cell width. func (b *Writer) updateWidth() { b.cell.width += utf8.RuneCount(b.buf[b.pos:]) b.pos = len(b.buf) } // To escape a text segment, bracket it with Escape characters. // For instance, the tab in this string "Ignore this tab: \xff\t\xff" // does not terminate a cell and constitutes a single character of // width one for formatting purposes. // // The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence. // const Escape = '\xff' // Start escaped mode. func (b *Writer) startEscape(ch byte) { switch ch { case Escape: b.endChar = Escape case '<': b.endChar = '>' case '&': b.endChar = ';' } } // Terminate escaped mode. If the escaped text was an HTML tag, its width // is assumed to be zero for formatting purposes; if it was an HTML entity, // its width is assumed to be one. In all other cases, the width is the // unicode width of the text. // func (b *Writer) endEscape() { switch b.endChar { case Escape: b.updateWidth() if b.flags&StripEscape == 0 { b.cell.width -= 2 // don't count the Escape chars } case '>': // tag of zero width case ';': b.cell.width++ // entity, count as one rune } b.pos = len(b.buf) b.endChar = 0 } // Terminate the current cell by adding it to the list of cells of the // current line. Returns the number of cells in that line. // func (b *Writer) terminateCell(htab bool) int { b.cell.htab = htab line := &b.lines[len(b.lines)-1] *line = append(*line, b.cell) b.cell = cell{} return len(*line) } func handlePanic(err *error, op string) { if e := recover(); e != nil { if nerr, ok := e.(osError); ok { *err = nerr.err return } panic("tabwriter: panic during " + op) } } // RememberedWidths returns a copy of the remembered per-column maximum widths. // Requires use of the RememberWidths flag, and is not threadsafe. func (b *Writer) RememberedWidths() []int { retval := make([]int, len(b.maxwidths)) copy(retval, b.maxwidths) return retval } // SetRememberedWidths sets the remembered per-column maximum widths. // Requires use of the RememberWidths flag, and is not threadsafe. func (b *Writer) SetRememberedWidths(widths []int) *Writer { b.maxwidths = make([]int, len(widths)) copy(b.maxwidths, widths) return b } // Flush should be called after the last call to Write to ensure // that any data buffered in the Writer is written to output. Any // incomplete escape sequence at the end is considered // complete for formatting purposes. func (b *Writer) Flush() error { return b.flush() } func (b *Writer) flush() (err error) { defer b.reset() // even in the presence of errors defer handlePanic(&err, "Flush") // add current cell if not empty if b.cell.size > 0 { if b.endChar != 0 { // inside escape - terminate it even if incomplete b.endEscape() } b.terminateCell(false) } // format contents of buffer b.format(0, 0, len(b.lines)) return nil } var hbar = []byte("---\n") // Write writes buf to the writer b. // The only errors returned are ones encountered // while writing to the underlying output stream. // func (b *Writer) Write(buf []byte) (n int, err error) { defer handlePanic(&err, "Write") // split text into cells n = 0 for i, ch := range buf { if b.endChar == 0 { // outside escape switch ch { case '\t', '\v', '\n', '\f': // end of cell b.append(buf[n:i]) b.updateWidth() n = i + 1 // ch consumed ncells := b.terminateCell(ch == '\t') if ch == '\n' || ch == '\f' { // terminate line b.addLine(ch == '\f') if ch == '\f' || ncells == 1 { // A '\f' always forces a flush. Otherwise, if the previous // line has only one cell which does not have an impact on // the formatting of the following lines (the last cell per // line is ignored by format()), thus we can flush the // Writer contents. if err = b.Flush(); err != nil { return } if ch == '\f' && b.flags&Debug != 0 { // indicate section break b.write0(hbar) } } } case Escape: // start of escaped sequence b.append(buf[n:i]) b.updateWidth() n = i if b.flags&StripEscape != 0 { n++ // strip Escape } b.startEscape(Escape) case '<', '&': // possibly an html tag/entity if b.flags&FilterHTML != 0 { // begin of tag/entity b.append(buf[n:i]) b.updateWidth() n = i b.startEscape(ch) } } } else { // inside escape if ch == b.endChar { // end of tag/entity j := i + 1 if ch == Escape && b.flags&StripEscape != 0 { j = i // strip Escape } b.append(buf[n:j]) n = i + 1 // ch consumed b.endEscape() } } } // append leftover text b.append(buf[n:]) n = len(buf) return } // NewWriter allocates and initializes a new tabwriter.Writer. // The parameters are the same as for the Init function. // func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) }
9,675
0
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt
kubeflow_public_repos/fate-operator/vendor/github.com/liggitt/tabwriter/.travis.yml
language: go go: - "1.8" - "1.9" - "1.10" - "1.11" - "1.12" - master script: go test -v ./...
9,676
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/ps/profile.sh
#!/bin/sh go test -c ./ps.test -test.run=none -test.bench=$2 -test.$1profile=$1.profile
9,677
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/ps/README.md
**This is a stable fork of https://github.com/mndrix/ps; it will not introduce breaking changes.** ps == Persistent data structures for Go. See the [full package documentation](http://godoc.org/github.com/lann/ps) Install with go get github.com/lann/ps
9,678
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/ps/map.go
// Fully persistent data structures. A persistent data structure is a data // structure that always preserves the previous version of itself when // it is modified. Such data structures are effectively immutable, // as their operations do not update the structure in-place, but instead // always yield a new structure. // // Persistent // data structures typically share structure among themselves. This allows // operations to avoid copying the entire data structure. package ps import ( "bytes" "fmt" ) // Any is a shorthand for Go's verbose interface{} type. type Any interface{} // A Map associates unique keys (type string) with values (type Any). type Map interface { // IsNil returns true if the Map is empty IsNil() bool // Set returns a new map in which key and value are associated. // If the key didn't exist before, it's created; otherwise, the // associated value is changed. // This operation is O(log N) in the number of keys. Set(key string, value Any) Map // Delete returns a new map with the association for key, if any, removed. // This operation is O(log N) in the number of keys. Delete(key string) Map // Lookup returns the value associated with a key, if any. If the key // exists, the second return value is true; otherwise, false. // This operation is O(log N) in the number of keys. Lookup(key string) (Any, bool) // Size returns the number of key value pairs in the map. // This takes O(1) time. Size() int // ForEach executes a callback on each key value pair in the map. ForEach(f func(key string, val Any)) // Keys returns a slice with all keys in this map. // This operation is O(N) in the number of keys. Keys() []string String() string } // Immutable (i.e. persistent) associative array const childCount = 8 const shiftSize = 3 type tree struct { count int hash uint64 // hash of the key (used for tree balancing) key string value Any children [childCount]*tree } var nilMap = &tree{} // Recursively set nilMap's subtrees to point at itself. // This eliminates all nil pointers in the map structure. // All map nodes are created by cloning this structure so // they avoid the problem too. func init() { for i := range nilMap.children { nilMap.children[i] = nilMap } } // NewMap allocates a new, persistent map from strings to values of // any type. // This is currently implemented as a path-copying binary tree. func NewMap() Map { return nilMap } func (self *tree) IsNil() bool { return self == nilMap } // clone returns an exact duplicate of a tree node func (self *tree) clone() *tree { var m tree m = *self return &m } // constants for FNV-1a hash algorithm const ( offset64 uint64 = 14695981039346656037 prime64 uint64 = 1099511628211 ) // hashKey returns a hash code for a given string func hashKey(key string) uint64 { hash := offset64 for _, codepoint := range key { hash ^= uint64(codepoint) hash *= prime64 } return hash } // Set returns a new map similar to this one but with key and value // associated. If the key didn't exist, it's created; otherwise, the // associated value is changed. func (self *tree) Set(key string, value Any) Map { hash := hashKey(key) return setLowLevel(self, hash, hash, key, value) } func setLowLevel(self *tree, partialHash, hash uint64, key string, value Any) *tree { if self.IsNil() { // an empty tree is easy m := self.clone() m.count = 1 m.hash = hash m.key = key m.value = value return m } if hash != self.hash { m := self.clone() i := partialHash % childCount m.children[i] = setLowLevel(self.children[i], partialHash>>shiftSize, hash, key, value) recalculateCount(m) return m } // replacing a key's previous value m := self.clone() m.value = value return m } // modifies a map by recalculating its key count based on the counts // of its subtrees func recalculateCount(m *tree) { count := 0 for _, t := range m.children { count += t.Size() } m.count = count + 1 // add one to count ourself } func (m *tree) Delete(key string) Map { hash := hashKey(key) newMap, _ := deleteLowLevel(m, hash, hash) return newMap } func deleteLowLevel(self *tree, partialHash, hash uint64) (*tree, bool) { // empty trees are easy if self.IsNil() { return self, false } if hash != self.hash { i := partialHash % childCount child, found := deleteLowLevel(self.children[i], partialHash>>shiftSize, hash) if !found { return self, false } newMap := self.clone() newMap.children[i] = child recalculateCount(newMap) return newMap, true // ? this wasn't in the original code } // we must delete our own node if self.isLeaf() { // we have no children return nilMap, true } /* if self.subtreeCount() == 1 { // only one subtree for _, t := range self.children { if t != nilMap { return t, true } } panic("Tree with 1 subtree actually had no subtrees") } */ // find a node to replace us i := -1 size := -1 for j, t := range self.children { if t.Size() > size { i = j size = t.Size() } } // make chosen leaf smaller replacement, child := self.children[i].deleteLeftmost() newMap := replacement.clone() for j := range self.children { if j == i { newMap.children[j] = child } else { newMap.children[j] = self.children[j] } } recalculateCount(newMap) return newMap, true } // delete the leftmost node in a tree returning the node that // was deleted and the tree left over after its deletion func (m *tree) deleteLeftmost() (*tree, *tree) { if m.isLeaf() { return m, nilMap } for i, t := range m.children { if t != nilMap { deleted, child := t.deleteLeftmost() newMap := m.clone() newMap.children[i] = child recalculateCount(newMap) return deleted, newMap } } panic("Tree isn't a leaf but also had no children. How does that happen?") } // isLeaf returns true if this is a leaf node func (m *tree) isLeaf() bool { return m.Size() == 1 } // returns the number of child subtrees we have func (m *tree) subtreeCount() int { count := 0 for _, t := range m.children { if t != nilMap { count++ } } return count } func (m *tree) Lookup(key string) (Any, bool) { hash := hashKey(key) return lookupLowLevel(m, hash, hash) } func lookupLowLevel(self *tree, partialHash, hash uint64) (Any, bool) { if self.IsNil() { // an empty tree is easy return nil, false } if hash != self.hash { i := partialHash % childCount return lookupLowLevel(self.children[i], partialHash>>shiftSize, hash) } // we found it return self.value, true } func (m *tree) Size() int { return m.count } func (m *tree) ForEach(f func(key string, val Any)) { if m.IsNil() { return } // ourself f(m.key, m.value) // children for _, t := range m.children { if t != nilMap { t.ForEach(f) } } } func (m *tree) Keys() []string { keys := make([]string, m.Size()) i := 0 m.ForEach(func(k string, v Any) { keys[i] = k i++ }) return keys } // make it easier to display maps for debugging func (m *tree) String() string { keys := m.Keys() buf := bytes.NewBufferString("{") for _, key := range keys { val, _ := m.Lookup(key) fmt.Fprintf(buf, "%s: %s, ", key, val) } fmt.Fprintf(buf, "}\n") return buf.String() }
9,679
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/ps/LICENSE
Copyright (c) 2013 Michael Hendricks Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,680
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/ps/list.go
package ps // List is a persistent list of possibly heterogenous values. type List interface { // IsNil returns true if the list is empty IsNil() bool // Cons returns a new list with val as the head Cons(val Any) List // Head returns the first element of the list; // panics if the list is empty Head() Any // Tail returns a list with all elements except the head; // panics if the list is empty Tail() List // Size returns the list's length. This takes O(1) time. Size() int // ForEach executes a callback for each value in the list. ForEach(f func(Any)) // Reverse returns a list whose elements are in the opposite order as // the original list. Reverse() List } // Immutable (i.e. persistent) list type list struct { depth int // the number of nodes after, and including, this one value Any tail *list } // An empty list shared by all lists var nilList = &list{} // NewList returns a new, empty list. The result is a singly linked // list implementation. All lists share an empty tail, so allocating // empty lists is efficient in time and memory. func NewList() List { return nilList } func (self *list) IsNil() bool { return self == nilList } func (self *list) Size() int { return self.depth } func (tail *list) Cons(val Any) List { var xs list xs.depth = tail.depth + 1 xs.value = val xs.tail = tail return &xs } func (self *list) Head() Any { if self.IsNil() { panic("Called Head() on an empty list") } return self.value } func (self *list) Tail() List { if self.IsNil() { panic("Called Tail() on an empty list") } return self.tail } // ForEach executes a callback for each value in the list func (self *list) ForEach(f func(Any)) { if self.IsNil() { return } f(self.Head()) self.Tail().ForEach(f) } // Reverse returns a list with elements in opposite order as this list func (self *list) Reverse() List { reversed := NewList() self.ForEach(func(v Any) { reversed = reversed.Cons(v) }) return reversed }
9,681
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/registry.go
package builder import ( "reflect" "sync" ) var ( registry = make(map[reflect.Type]reflect.Type) registryMux sync.RWMutex ) // RegisterType maps the given builderType to a structType. // This mapping affects the type of slices returned by Get and is required for // GetStruct to work. // // Returns a Value containing an empty instance of the registered builderType. // // RegisterType will panic if builderType's underlying type is not Builder or // if structType's Kind is not Struct. func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { registryMux.Lock() defer registryMux.Unlock() structType.NumField() // Panic if structType is not a struct registry[builderType] = structType emptyValue := emptyBuilderValue.Convert(builderType) return &emptyValue } // Register wraps RegisterType, taking instances instead of Types. // // Returns an empty instance of the registered builder type which can be used // as the initial value for builder expressions. See example. func Register(builderProto, structProto interface{}) interface{} { empty := RegisterType( reflect.TypeOf(builderProto), reflect.TypeOf(structProto), ).Interface() return empty } func getBuilderStructType(builderType reflect.Type) *reflect.Type { registryMux.RLock() defer registryMux.RUnlock() structType, ok := registry[builderType] if !ok { return nil } return &structType } func newBuilderStruct(builderType reflect.Type) *reflect.Value { structType := getBuilderStructType(builderType) if structType == nil { return nil } newStruct := reflect.New(*structType).Elem() return &newStruct }
9,682
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/builder.go
// Package builder provides a method for writing fluent immutable builders. package builder import ( "github.com/lann/ps" "go/ast" "reflect" ) // Builder stores a set of named values. // // New types can be declared with underlying type Builder and used with the // functions in this package. See example. // // Instances of Builder should be treated as immutable. It is up to the // implementor to ensure mutable values set on a Builder are not mutated while // the Builder is in use. type Builder struct { builderMap ps.Map } var ( EmptyBuilder = Builder{ps.NewMap()} emptyBuilderValue = reflect.ValueOf(EmptyBuilder) ) func getBuilderMap(builder interface{}) ps.Map { b := convert(builder, Builder{}).(Builder) if b.builderMap == nil { return ps.NewMap() } return b.builderMap } // Set returns a copy of the given builder with a new value set for the given // name. // // Set (and all other functions taking a builder in this package) will panic if // the given builder's underlying type is not Builder. func Set(builder interface{}, name string, v interface{}) interface{} { b := Builder{getBuilderMap(builder).Set(name, v)} return convert(b, builder) } // Delete returns a copy of the given builder with the given named value unset. func Delete(builder interface{}, name string) interface{} { b := Builder{getBuilderMap(builder).Delete(name)} return convert(b, builder) } // Append returns a copy of the given builder with new value(s) appended to the // named list. If the value was previously unset or set with Set (even to a e.g. // slice values), the new value(s) will be appended to an empty list. func Append(builder interface{}, name string, vs ...interface{}) interface{} { return Extend(builder, name, vs) } // Extend behaves like Append, except it takes a single slice or array value // which will be concatenated to the named list. // // Unlike a variadic call to Append - which requires a []interface{} value - // Extend accepts slices or arrays of any type. // // Extend will panic if the given value is not a slice, array, or nil. func Extend(builder interface{}, name string, vs interface{}) interface{} { if vs == nil { return builder } maybeList, ok := getBuilderMap(builder).Lookup(name) var list ps.List if ok { list, ok = maybeList.(ps.List) } if !ok { list = ps.NewList() } forEach(vs, func(v interface{}) { list = list.Cons(v) }) return Set(builder, name, list) } func listToSlice(list ps.List, arrayType reflect.Type) reflect.Value { size := list.Size() slice := reflect.MakeSlice(arrayType, size, size) for i := size - 1; i >= 0; i-- { val := reflect.ValueOf(list.Head()) slice.Index(i).Set(val) list = list.Tail() } return slice } var anyArrayType = reflect.TypeOf([]interface{}{}) // Get retrieves a single named value from the given builder. // If the value has not been set, it returns (nil, false). Otherwise, it will // return (value, true). // // If the named value was last set with Append or Extend, the returned value // will be a slice. If the given Builder has been registered with Register or // RegisterType and the given name is an exported field of the registered // struct, the returned slice will have the same type as that field. Otherwise // the slice will have type []interface{}. It will panic if the given name is a // registered struct's exported field and the value set on the Builder is not // assignable to the field. func Get(builder interface{}, name string) (interface{}, bool) { val, ok := getBuilderMap(builder).Lookup(name) if !ok { return nil, false } list, isList := val.(ps.List) if isList { arrayType := anyArrayType if ast.IsExported(name) { structType := getBuilderStructType(reflect.TypeOf(builder)) if structType != nil { field, ok := (*structType).FieldByName(name) if ok { arrayType = field.Type } } } val = listToSlice(list, arrayType).Interface() } return val, true } // GetMap returns a map[string]interface{} of the values set in the given // builder. // // See notes on Get regarding returned slices. func GetMap(builder interface{}) map[string]interface{} { m := getBuilderMap(builder) structType := getBuilderStructType(reflect.TypeOf(builder)) ret := make(map[string]interface{}, m.Size()) m.ForEach(func(name string, val ps.Any) { list, isList := val.(ps.List) if isList { arrayType := anyArrayType if structType != nil { field, ok := (*structType).FieldByName(name) if ok { arrayType = field.Type } } val = listToSlice(list, arrayType).Interface() } ret[name] = val }) return ret } // GetStruct builds a new struct from the given registered builder. // It will return nil if the given builder's type has not been registered with // Register or RegisterValue. // // All values set on the builder with names that start with an uppercase letter // (i.e. which would be exported if they were identifiers) are assigned to the // corresponding exported fields of the struct. // // GetStruct will panic if any of these "exported" values are not assignable to // their corresponding struct fields. func GetStruct(builder interface{}) interface{} { structVal := newBuilderStruct(reflect.TypeOf(builder)) if structVal == nil { return nil } return scanStruct(builder, structVal) } // GetStructLike builds a new struct from the given builder with the same type // as the given struct. // // All values set on the builder with names that start with an uppercase letter // (i.e. which would be exported if they were identifiers) are assigned to the // corresponding exported fields of the struct. // // ScanStruct will panic if any of these "exported" values are not assignable to // their corresponding struct fields. func GetStructLike(builder interface{}, strct interface{}) interface{} { structVal := reflect.New(reflect.TypeOf(strct)).Elem() return scanStruct(builder, &structVal) } func scanStruct(builder interface{}, structVal *reflect.Value) interface{} { getBuilderMap(builder).ForEach(func(name string, val ps.Any) { if ast.IsExported(name) { field := structVal.FieldByName(name) var value reflect.Value switch v := val.(type) { case nil: switch field.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: value = reflect.Zero(field.Type()) } // nil is not valid for this Type; Set will panic case ps.List: value = listToSlice(v, field.Type()) default: value = reflect.ValueOf(val) } field.Set(value) } }) return structVal.Interface() }
9,683
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/README.md
# Builder - fluent immutable builders for Go [![GoDoc](https://godoc.org/github.com/lann/builder?status.png)](https://godoc.org/github.com/lann/builder) [![Build Status](https://travis-ci.org/lann/builder.png?branch=master)](https://travis-ci.org/lann/builder) Builder was originally written for [Squirrel](https://github.com/lann/squirrel), a fluent SQL generator. It is probably the best example of Builder in action. Builder helps you write **fluent** DSLs for your libraries with method chaining: ```go resp := ReqBuilder. Url("http://golang.org"). Header("User-Agent", "Builder"). Get() ``` Builder uses **immutable** persistent data structures ([these](https://github.com/mndrix/ps), specifically) so that each step in your method chain can be reused: ```go build := WordBuilder.AddLetters("Build") builder := build.AddLetters("er") building := build.AddLetters("ing") ``` Builder makes it easy to **build** structs using the **builder** pattern (*surprise!*): ```go import "github.com/lann/builder" type Muppet struct { Name string Friends []string } type muppetBuilder builder.Builder func (b muppetBuilder) Name(name string) muppetBuilder { return builder.Set(b, "Name", name).(muppetBuilder) } func (b muppetBuilder) AddFriend(friend string) muppetBuilder { return builder.Append(b, "Friends", friend).(muppetBuilder) } func (b muppetBuilder) Build() Muppet { return builder.GetStruct(b).(Muppet) } var MuppetBuilder = builder.Register(muppetBuilder{}, Muppet{}).(muppetBuilder) ``` ```go MuppetBuilder. Name("Beaker"). AddFriend("Dr. Honeydew"). Build() => Muppet{Name:"Beaker", Friends:[]string{"Dr. Honeydew"}} ``` ## License Builder is released under the [MIT License](http://www.opensource.org/licenses/MIT).
9,684
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/reflect.go
package builder import "reflect" func convert(from interface{}, to interface{}) interface{} { return reflect. ValueOf(from). Convert(reflect.TypeOf(to)). Interface() } func forEach(s interface{}, f func(interface{})) { val := reflect.ValueOf(s) kind := val.Kind() if kind != reflect.Slice && kind != reflect.Array { panic(&reflect.ValueError{Method: "builder.forEach", Kind: kind}) } l := val.Len() for i := 0; i < l; i++ { f(val.Index(i).Interface()) } }
9,685
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/LICENSE
MIT License Copyright (c) 2014-2015 Lann Martin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,686
0
kubeflow_public_repos/fate-operator/vendor/github.com/lann
kubeflow_public_repos/fate-operator/vendor/github.com/lann/builder/.travis.yml
language: go go: - '1.8' - '1.9' - '1.10' - tip
9,687
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/go.mod
module github.com/asaskevich/govalidator go 1.12
9,688
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/error.go
package govalidator import ( "sort" "strings" ) // Errors is an array of multiple errors and conforms to the error interface. type Errors []error // Errors returns itself. func (es Errors) Errors() []error { return es } func (es Errors) Error() string { var errs []string for _, e := range es { errs = append(errs, e.Error()) } sort.Strings(errs) return strings.Join(errs, ";") } // Error encapsulates a name, an error and whether there's a custom error message or not. type Error struct { Name string Err error CustomErrorMessageExists bool // Validator indicates the name of the validator that failed Validator string Path []string } func (e Error) Error() string { if e.CustomErrorMessageExists { return e.Err.Error() } errName := e.Name if len(e.Path) > 0 { errName = strings.Join(append(e.Path, e.Name), ".") } return errName + ": " + e.Err.Error() }
9,689
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/README.md
govalidator =========== [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) [![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). #### Installation Make sure that Go is installed on your computer. Type the following command in your terminal: go get github.com/asaskevich/govalidator or you can get specified release of the package with `gopkg.in`: go get gopkg.in/asaskevich/govalidator.v10 After it the package is ready to use. #### Import package in your project Add following line in your `*.go` file: ```go import "github.com/asaskevich/govalidator" ``` If you are unhappy to use long `govalidator`, you can do something like this: ```go import ( valid "github.com/asaskevich/govalidator" ) ``` #### Activate behavior to require all fields have a validation tag by default `SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. `SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. ```go import "github.com/asaskevich/govalidator" func init() { govalidator.SetFieldsRequiredByDefault(true) } ``` Here's some code to explain it: ```go // this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): type exampleStruct struct { Name string `` Email string `valid:"email"` } // this, however, will only fail when Email is empty or an invalid email address: type exampleStruct2 struct { Name string `valid:"-"` Email string `valid:"email"` } // lastly, this will only fail when Email is an invalid email address but not when it's empty: type exampleStruct2 struct { Name string `valid:"-"` Email string `valid:"email,optional"` } ``` #### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) ##### Custom validator function signature A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. ```go import "github.com/asaskevich/govalidator" // old signature func(i interface{}) bool // new signature func(i interface{}, o interface{}) bool ``` ##### Adding a custom validator This was changed to prevent data races when accessing custom validators. ```go import "github.com/asaskevich/govalidator" // before govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { // ... } // after govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { // ... }) ``` #### List of functions: ```go func Abs(value float64) float64 func BlackList(str, chars string) string func ByteLength(str string, params ...string) bool func CamelCaseToUnderscore(str string) string func Contains(str, substring string) bool func Count(array []interface{}, iterator ConditionIterator) int func Each(array []interface{}, iterator Iterator) func ErrorByField(e error, field string) string func ErrorsByField(e error) map[string]string func Filter(array []interface{}, iterator ConditionIterator) []interface{} func Find(array []interface{}, iterator ConditionIterator) interface{} func GetLine(s string, index int) (string, error) func GetLines(s string) []string func HasLowerCase(str string) bool func HasUpperCase(str string) bool func HasWhitespace(str string) bool func HasWhitespaceOnly(str string) bool func InRange(value interface{}, left interface{}, right interface{}) bool func InRangeFloat32(value, left, right float32) bool func InRangeFloat64(value, left, right float64) bool func InRangeInt(value, left, right interface{}) bool func IsASCII(str string) bool func IsAlpha(str string) bool func IsAlphanumeric(str string) bool func IsBase64(str string) bool func IsByteLength(str string, min, max int) bool func IsCIDR(str string) bool func IsCRC32(str string) bool func IsCRC32b(str string) bool func IsCreditCard(str string) bool func IsDNSName(str string) bool func IsDataURI(str string) bool func IsDialString(str string) bool func IsDivisibleBy(str, num string) bool func IsEmail(str string) bool func IsExistingEmail(email string) bool func IsFilePath(str string) (bool, int) func IsFloat(str string) bool func IsFullWidth(str string) bool func IsHalfWidth(str string) bool func IsHash(str string, algorithm string) bool func IsHexadecimal(str string) bool func IsHexcolor(str string) bool func IsHost(str string) bool func IsIP(str string) bool func IsIPv4(str string) bool func IsIPv6(str string) bool func IsISBN(str string, version int) bool func IsISBN10(str string) bool func IsISBN13(str string) bool func IsISO3166Alpha2(str string) bool func IsISO3166Alpha3(str string) bool func IsISO4217(str string) bool func IsISO693Alpha2(str string) bool func IsISO693Alpha3b(str string) bool func IsIn(str string, params ...string) bool func IsInRaw(str string, params ...string) bool func IsInt(str string) bool func IsJSON(str string) bool func IsLatitude(str string) bool func IsLongitude(str string) bool func IsLowerCase(str string) bool func IsMAC(str string) bool func IsMD4(str string) bool func IsMD5(str string) bool func IsMagnetURI(str string) bool func IsMongoID(str string) bool func IsMultibyte(str string) bool func IsNatural(value float64) bool func IsNegative(value float64) bool func IsNonNegative(value float64) bool func IsNonPositive(value float64) bool func IsNotNull(str string) bool func IsNull(str string) bool func IsNumeric(str string) bool func IsPort(str string) bool func IsPositive(value float64) bool func IsPrintableASCII(str string) bool func IsRFC3339(str string) bool func IsRFC3339WithoutZone(str string) bool func IsRGBcolor(str string) bool func IsRequestURI(rawurl string) bool func IsRequestURL(rawurl string) bool func IsRipeMD128(str string) bool func IsRipeMD160(str string) bool func IsRsaPub(str string, params ...string) bool func IsRsaPublicKey(str string, keylen int) bool func IsSHA1(str string) bool func IsSHA256(str string) bool func IsSHA384(str string) bool func IsSHA512(str string) bool func IsSSN(str string) bool func IsSemver(str string) bool func IsTiger128(str string) bool func IsTiger160(str string) bool func IsTiger192(str string) bool func IsTime(str string, format string) bool func IsType(v interface{}, params ...string) bool func IsURL(str string) bool func IsUTFDigit(str string) bool func IsUTFLetter(str string) bool func IsUTFLetterNumeric(str string) bool func IsUTFNumeric(str string) bool func IsUUID(str string) bool func IsUUIDv3(str string) bool func IsUUIDv4(str string) bool func IsUUIDv5(str string) bool func IsUnixTime(str string) bool func IsUpperCase(str string) bool func IsVariableWidth(str string) bool func IsWhole(value float64) bool func LeftTrim(str, chars string) string func Map(array []interface{}, iterator ResultIterator) []interface{} func Matches(str, pattern string) bool func MaxStringLength(str string, params ...string) bool func MinStringLength(str string, params ...string) bool func NormalizeEmail(str string) (string, error) func PadBoth(str string, padStr string, padLen int) string func PadLeft(str string, padStr string, padLen int) string func PadRight(str string, padStr string, padLen int) string func PrependPathToErrors(err error, path string) error func Range(str string, params ...string) bool func RemoveTags(s string) string func ReplacePattern(str, pattern, replace string) string func Reverse(s string) string func RightTrim(str, chars string) string func RuneLength(str string, params ...string) bool func SafeFileName(str string) string func SetFieldsRequiredByDefault(value bool) func SetNilPtrAllowedByRequired(value bool) func Sign(value float64) float64 func StringLength(str string, params ...string) bool func StringMatches(s string, params ...string) bool func StripLow(str string, keepNewLines bool) string func ToBoolean(str string) (bool, error) func ToFloat(str string) (float64, error) func ToInt(value interface{}) (res int64, err error) func ToJSON(obj interface{}) (string, error) func ToString(obj interface{}) string func Trim(str, chars string) string func Truncate(str string, length int, ending string) string func TruncatingErrorf(str string, args ...interface{}) error func UnderscoreToCamelCase(s string) string func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) func ValidateStruct(s interface{}) (bool, error) func WhiteList(str, chars string) string type ConditionIterator type CustomTypeValidator type Error func (e Error) Error() string type Errors func (es Errors) Error() string func (es Errors) Errors() []error type ISO3166Entry type ISO693Entry type InterfaceParamValidator type Iterator type ParamValidator type ResultIterator type UnsupportedTypeError func (e *UnsupportedTypeError) Error() string type Validator ``` #### Examples ###### IsURL ```go println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) ``` ###### IsType ```go println(govalidator.IsType("Bob", "string")) println(govalidator.IsType(1, "int")) i := 1 println(govalidator.IsType(&i, "*int")) ``` IsType can be used through the tag `type` which is essential for map validation: ```go type User struct { Name string `valid:"type(string)"` Age int `valid:"type(int)"` Meta interface{} `valid:"type(string)"` } result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"}) if err != nil { println("error: " + err.Error()) } println(result) ``` ###### ToString ```go type User struct { FirstName string LastName string } str := govalidator.ToString(&User{"John", "Juan"}) println(str) ``` ###### Each, Map, Filter, Count for slices Each iterates over the slice/array and calls Iterator for every item ```go data := []interface{}{1, 2, 3, 4, 5} var fn govalidator.Iterator = func(value interface{}, index int) { println(value.(int)) } govalidator.Each(data, fn) ``` ```go data := []interface{}{1, 2, 3, 4, 5} var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { return value.(int) * 3 } _ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} ``` ```go data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { return value.(int)%2 == 0 } _ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} _ = govalidator.Count(data, fn) // result = 5 ``` ###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: ```go govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { return str == "duck" }) ``` For completely custom validators (interface-based), see below. Here is a list of available validators for struct fields (validator - used function): ```go "email": IsEmail, "url": IsURL, "dialstring": IsDialString, "requrl": IsRequestURL, "requri": IsRequestURI, "alpha": IsAlpha, "utfletter": IsUTFLetter, "alphanum": IsAlphanumeric, "utfletternum": IsUTFLetterNumeric, "numeric": IsNumeric, "utfnumeric": IsUTFNumeric, "utfdigit": IsUTFDigit, "hexadecimal": IsHexadecimal, "hexcolor": IsHexcolor, "rgbcolor": IsRGBcolor, "lowercase": IsLowerCase, "uppercase": IsUpperCase, "int": IsInt, "float": IsFloat, "null": IsNull, "uuid": IsUUID, "uuidv3": IsUUIDv3, "uuidv4": IsUUIDv4, "uuidv5": IsUUIDv5, "creditcard": IsCreditCard, "isbn10": IsISBN10, "isbn13": IsISBN13, "json": IsJSON, "multibyte": IsMultibyte, "ascii": IsASCII, "printableascii": IsPrintableASCII, "fullwidth": IsFullWidth, "halfwidth": IsHalfWidth, "variablewidth": IsVariableWidth, "base64": IsBase64, "datauri": IsDataURI, "ip": IsIP, "port": IsPort, "ipv4": IsIPv4, "ipv6": IsIPv6, "dns": IsDNSName, "host": IsHost, "mac": IsMAC, "latitude": IsLatitude, "longitude": IsLongitude, "ssn": IsSSN, "semver": IsSemver, "rfc3339": IsRFC3339, "rfc3339WithoutZone": IsRFC3339WithoutZone, "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, ``` Validators with parameters ```go "range(min|max)": Range, "length(min|max)": ByteLength, "runelength(min|max)": RuneLength, "stringlength(min|max)": StringLength, "matches(pattern)": StringMatches, "in(string1|string2|...|stringN)": IsIn, "rsapub(keylength)" : IsRsaPub, ``` Validators with parameters for any type ```go "type(type)": IsType, ``` And here is small example of usage: ```go type Post struct { Title string `valid:"alphanum,required"` Message string `valid:"duck,ascii"` Message2 string `valid:"animal(dog)"` AuthorIP string `valid:"ipv4"` Date string `valid:"-"` } post := &Post{ Title: "My Example Post", Message: "duck", Message2: "dog", AuthorIP: "123.234.54.3", } // Add your own struct validation tags govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { return str == "duck" }) // Add your own struct validation tags with parameter govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { species := params[0] return str == species }) govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") result, err := govalidator.ValidateStruct(post) if err != nil { println("error: " + err.Error()) } println(result) ``` ###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` So here is small example of usage: ```go var mapTemplate = map[string]interface{}{ "name":"required,alpha", "family":"required,alpha", "email":"required,email", "cell-phone":"numeric", "address":map[string]interface{}{ "line1":"required,alphanum", "line2":"alphanum", "postal-code":"numeric", }, } var inputMap = map[string]interface{}{ "name":"Bob", "family":"Smith", "email":"[email protected]", "address":map[string]interface{}{ "line1":"", "line2":"", "postal-code":"", }, } result, err := govalidator.ValidateMap(inputMap, mapTemplate) if err != nil { println("error: " + err.Error()) } println(result) ``` ###### WhiteList ```go // Remove all characters from string ignoring characters between "a" and "z" println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") ``` ###### Custom validation functions Custom validation using your own domain specific validators is also available - here's an example of how to use it: ```go import "github.com/asaskevich/govalidator" type CustomByteArray [6]byte // custom types are supported and can be validated type StructWithCustomByteArray struct { ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence Email string `valid:"email"` CustomMinLength int `valid:"-"` } govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // you can type switch on the context interface being validated case StructWithCustomByteArray: // you can check and validate against some other field in the context, // return early or not validate against the context at all – your choice case SomeOtherType: // ... default: // expecting some other type? Throw/panic here or continue } switch v := i.(type) { // type switch on the struct field being validated case CustomByteArray: for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes if e != 0 { return true } } } return false }) govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation case StructWithCustomByteArray: return len(v.ID) >= v.CustomMinLength } return false }) ``` ###### Loop over Error() By default .Error() returns all errors in a single String. To access each error you can do this: ```go if err != nil { errs := err.(govalidator.Errors).Errors() for _, e := range errs { fmt.Println(e.Error()) } } ``` ###### Custom error messages Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: ```go type Ticket struct { Id int64 `json:"id"` FirstName string `json:"firstname" valid:"required~First name is blank"` } ``` #### Notes Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). #### Support If you do have a contribution to the package, feel free to create a Pull Request or an Issue. #### What to contribute If you don't know what to do, there are some features and functions that need to be done - [ ] Refactor code - [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check - [ ] Create actual list of contributors and projects that currently using this package - [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) - [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) - [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new - [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc - [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) - [ ] Implement fuzzing testing - [ ] Implement some struct/map/array utilities - [ ] Implement map/array validation - [ ] Implement benchmarking - [ ] Implement batch of examples - [ ] Look at forks for new features and fixes #### Advice Feel free to create what you want, but keep in mind when you implement new features: - Code must be clear and readable, names of variables/constants clearly describes what they are doing - Public functions must be documented and described in source file and added to README.md to the list of available functions - There are must be unit-tests for any new functions and improvements ## Credits ### Contributors This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. #### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) * [Daniel Lohse](https://github.com/annismckenzie) * [Attila Oláh](https://github.com/attilaolah) * [Daniel Korner](https://github.com/Dadie) * [Steven Wilkin](https://github.com/stevenwilkin) * [Deiwin Sarjas](https://github.com/deiwin) * [Noah Shibley](https://github.com/slugmobile) * [Nathan Davies](https://github.com/nathj07) * [Matt Sanford](https://github.com/mzsanford) * [Simon ccl1115](https://github.com/ccl1115) <a href="https://github.com/asaskevich/govalidator/graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a> ### Backers Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] <a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a> ### Sponsors Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] <a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a> ## License [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
9,690
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/types.go
package govalidator import ( "reflect" "regexp" "sort" "sync" ) // Validator is a wrapper for a validator function that returns bool and accepts string. type Validator func(str string) bool // CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. // The second parameter should be the context (in the case of validating a struct: the whole object being validated). type CustomTypeValidator func(i interface{}, o interface{}) bool // ParamValidator is a wrapper for validator functions that accepts additional parameters. type ParamValidator func(str string, params ...string) bool type InterfaceParamValidator func(in interface{}, params ...string) bool type tagOptionsMap map[string]tagOption func (t tagOptionsMap) orderedKeys() []string { var keys []string for k := range t { keys = append(keys, k) } sort.Slice(keys, func(a, b int) bool { return t[keys[a]].order < t[keys[b]].order }) return keys } type tagOption struct { name string customErrorMessage string order int } // UnsupportedTypeError is a wrapper for reflect.Type type UnsupportedTypeError struct { Type reflect.Type } // stringValues is a slice of reflect.Value holding *reflect.StringValue. // It implements the methods to sort by string. type stringValues []reflect.Value // InterfaceParamTagMap is a map of functions accept variants parameters for an interface value var InterfaceParamTagMap = map[string]InterfaceParamValidator{ "type": IsType, } // InterfaceParamTagRegexMap maps interface param tags to their respective regexes. var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ "type": regexp.MustCompile(`^type\((.*)\)$`), } // ParamTagMap is a map of functions accept variants parameters var ParamTagMap = map[string]ParamValidator{ "length": ByteLength, "range": Range, "runelength": RuneLength, "stringlength": StringLength, "matches": StringMatches, "in": IsInRaw, "rsapub": IsRsaPub, "minstringlength": MinStringLength, "maxstringlength": MaxStringLength, } // ParamTagRegexMap maps param tags to their respective regexes. var ParamTagRegexMap = map[string]*regexp.Regexp{ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), "in": regexp.MustCompile(`^in\((.*)\)`), "matches": regexp.MustCompile(`^matches\((.+)\)$`), "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), } type customTypeTagMap struct { validators map[string]CustomTypeValidator sync.RWMutex } func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { tm.RLock() defer tm.RUnlock() v, ok := tm.validators[name] return v, ok } func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { tm.Lock() defer tm.Unlock() tm.validators[name] = ctv } // CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. // Use this to validate compound or custom types that need to be handled as a whole, e.g. // `type UUID [16]byte` (this would be handled as an array of bytes). var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} // TagMap is a map of functions, that can be used as tags for ValidateStruct function. var TagMap = map[string]Validator{ "email": IsEmail, "url": IsURL, "dialstring": IsDialString, "requrl": IsRequestURL, "requri": IsRequestURI, "alpha": IsAlpha, "utfletter": IsUTFLetter, "alphanum": IsAlphanumeric, "utfletternum": IsUTFLetterNumeric, "numeric": IsNumeric, "utfnumeric": IsUTFNumeric, "utfdigit": IsUTFDigit, "hexadecimal": IsHexadecimal, "hexcolor": IsHexcolor, "rgbcolor": IsRGBcolor, "lowercase": IsLowerCase, "uppercase": IsUpperCase, "int": IsInt, "float": IsFloat, "null": IsNull, "notnull": IsNotNull, "uuid": IsUUID, "uuidv3": IsUUIDv3, "uuidv4": IsUUIDv4, "uuidv5": IsUUIDv5, "creditcard": IsCreditCard, "isbn10": IsISBN10, "isbn13": IsISBN13, "json": IsJSON, "multibyte": IsMultibyte, "ascii": IsASCII, "printableascii": IsPrintableASCII, "fullwidth": IsFullWidth, "halfwidth": IsHalfWidth, "variablewidth": IsVariableWidth, "base64": IsBase64, "datauri": IsDataURI, "ip": IsIP, "port": IsPort, "ipv4": IsIPv4, "ipv6": IsIPv6, "dns": IsDNSName, "host": IsHost, "mac": IsMAC, "latitude": IsLatitude, "longitude": IsLongitude, "ssn": IsSSN, "semver": IsSemver, "rfc3339": IsRFC3339, "rfc3339WithoutZone": IsRFC3339WithoutZone, "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, "ISO4217": IsISO4217, "IMEI": IsIMEI, } // ISO3166Entry stores country codes type ISO3166Entry struct { EnglishShortName string FrenchShortName string Alpha2Code string Alpha3Code string Numeric string } //ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" var ISO3166List = []ISO3166Entry{ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, {"Albania", "Albanie (l')", "AL", "ALB", "008"}, {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, {"Andorra", "Andorre (l')", "AD", "AND", "020"}, {"Angola", "Angola (l')", "AO", "AGO", "024"}, {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, {"Australia", "Australie (l')", "AU", "AUS", "036"}, {"Austria", "Autriche (l')", "AT", "AUT", "040"}, {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, {"Canada", "Canada (le)", "CA", "CAN", "124"}, {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, {"Chad", "Tchad (le)", "TD", "TCD", "148"}, {"Chile", "Chili (le)", "CL", "CHL", "152"}, {"China", "Chine (la)", "CN", "CHN", "156"}, {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, {"Colombia", "Colombie (la)", "CO", "COL", "170"}, {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, {"Mayotte", "Mayotte", "YT", "MYT", "175"}, {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, {"Cuba", "Cuba", "CU", "CUB", "192"}, {"Cyprus", "Chypre", "CY", "CYP", "196"}, {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, {"El Salvador", "El Salvador", "SV", "SLV", "222"}, {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, {"Estonia", "Estonie (l')", "EE", "EST", "233"}, {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, {"Finland", "Finlande (la)", "FI", "FIN", "246"}, {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, {"France", "France (la)", "FR", "FRA", "250"}, {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, {"Kiribati", "Kiribati", "KI", "KIR", "296"}, {"Greece", "Grèce (la)", "GR", "GRC", "300"}, {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, {"Guam", "Guam", "GU", "GUM", "316"}, {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, {"Haiti", "Haïti", "HT", "HTI", "332"}, {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, {"Honduras", "Honduras (le)", "HN", "HND", "340"}, {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, {"Iceland", "Islande (l')", "IS", "ISL", "352"}, {"India", "Inde (l')", "IN", "IND", "356"}, {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, {"Israel", "Israël", "IL", "ISR", "376"}, {"Italy", "Italie (l')", "IT", "ITA", "380"}, {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, {"Japan", "Japon (le)", "JP", "JPN", "392"}, {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, {"Libya", "Libye (la)", "LY", "LBY", "434"}, {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, {"Macao", "Macao", "MO", "MAC", "446"}, {"Madagascar", "Madagascar", "MG", "MDG", "450"}, {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, {"Mali", "Mali (le)", "ML", "MLI", "466"}, {"Malta", "Malte", "MT", "MLT", "470"}, {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, {"Mauritius", "Maurice", "MU", "MUS", "480"}, {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, {"Monaco", "Monaco", "MC", "MCO", "492"}, {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, {"Montserrat", "Montserrat", "MS", "MSR", "500"}, {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, {"Oman", "Oman", "OM", "OMN", "512"}, {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, {"Nauru", "Nauru", "NR", "NRU", "520"}, {"Nepal", "Népal (le)", "NP", "NPL", "524"}, {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, {"Curaçao", "Curaçao", "CW", "CUW", "531"}, {"Aruba", "Aruba", "AW", "ABW", "533"}, {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, {"Niue", "Niue", "NU", "NIU", "570"}, {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, {"Norway", "Norvège (la)", "NO", "NOR", "578"}, {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, {"Palau", "Palaos (les)", "PW", "PLW", "585"}, {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, {"Panama", "Panama (le)", "PA", "PAN", "591"}, {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, {"Peru", "Pérou (le)", "PE", "PER", "604"}, {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, {"Poland", "Pologne (la)", "PL", "POL", "616"}, {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, {"Réunion", "Réunion (La)", "RE", "REU", "638"}, {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, {"Anguilla", "Anguilla", "AI", "AIA", "660"}, {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, {"Singapore", "Singapour", "SG", "SGP", "702"}, {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, {"Spain", "Espagne (l')", "ES", "ESP", "724"}, {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, {"Sweden", "Suède (la)", "SE", "SWE", "752"}, {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, {"Togo", "Togo (le)", "TG", "TGO", "768"}, {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, {"Tonga", "Tonga (les)", "TO", "TON", "776"}, {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, {"Guernsey", "Guernesey", "GG", "GGY", "831"}, {"Jersey", "Jersey", "JE", "JEY", "832"}, {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, } // ISO4217List is the list of ISO currency codes var ISO4217List = []string{ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", "DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR", "FJD", "FKP", "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR", "IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", "VEF", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", "YER", "ZAR", "ZMW", "ZWL", } // ISO693Entry stores ISO language codes type ISO693Entry struct { Alpha3bCode string Alpha2Code string English string } //ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json var ISO693List = []ISO693Entry{ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, }
9,691
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/numerics.go
package govalidator import ( "math" "reflect" ) // Abs returns absolute value of number func Abs(value float64) float64 { return math.Abs(value) } // Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise func Sign(value float64) float64 { if value > 0 { return 1 } else if value < 0 { return -1 } else { return 0 } } // IsNegative returns true if value < 0 func IsNegative(value float64) bool { return value < 0 } // IsPositive returns true if value > 0 func IsPositive(value float64) bool { return value > 0 } // IsNonNegative returns true if value >= 0 func IsNonNegative(value float64) bool { return value >= 0 } // IsNonPositive returns true if value <= 0 func IsNonPositive(value float64) bool { return value <= 0 } // InRange returns true if value lies between left and right border func InRangeInt(value, left, right interface{}) bool { value64, _ := ToInt(value) left64, _ := ToInt(left) right64, _ := ToInt(right) if left64 > right64 { left64, right64 = right64, left64 } return value64 >= left64 && value64 <= right64 } // InRange returns true if value lies between left and right border func InRangeFloat32(value, left, right float32) bool { if left > right { left, right = right, left } return value >= left && value <= right } // InRange returns true if value lies between left and right border func InRangeFloat64(value, left, right float64) bool { if left > right { left, right = right, left } return value >= left && value <= right } // InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type func InRange(value interface{}, left interface{}, right interface{}) bool { reflectValue := reflect.TypeOf(value).Kind() reflectLeft := reflect.TypeOf(left).Kind() reflectRight := reflect.TypeOf(right).Kind() if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { return InRangeInt(value.(int), left.(int), right.(int)) } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { return InRangeFloat32(value.(float32), left.(float32), right.(float32)) } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { return InRangeFloat64(value.(float64), left.(float64), right.(float64)) } else { return false } } // IsWhole returns true if value is whole number func IsWhole(value float64) bool { return math.Remainder(value, 1) == 0 } // IsNatural returns true if value is natural number (positive and whole) func IsNatural(value float64) bool { return IsWhole(value) && IsPositive(value) }
9,692
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/utils.go
package govalidator import ( "errors" "fmt" "html" "math" "path" "regexp" "strings" "unicode" "unicode/utf8" ) // Contains checks if the string contains the substring. func Contains(str, substring string) bool { return strings.Contains(str, substring) } // Matches checks if string matches the pattern (pattern is regular expression) // In case of error return false func Matches(str, pattern string) bool { match, _ := regexp.MatchString(pattern, str) return match } // LeftTrim trims characters from the left side of the input. // If second argument is empty, it will remove leading spaces. func LeftTrim(str, chars string) string { if chars == "" { return strings.TrimLeftFunc(str, unicode.IsSpace) } r, _ := regexp.Compile("^[" + chars + "]+") return r.ReplaceAllString(str, "") } // RightTrim trims characters from the right side of the input. // If second argument is empty, it will remove trailing spaces. func RightTrim(str, chars string) string { if chars == "" { return strings.TrimRightFunc(str, unicode.IsSpace) } r, _ := regexp.Compile("[" + chars + "]+$") return r.ReplaceAllString(str, "") } // Trim trims characters from both sides of the input. // If second argument is empty, it will remove spaces. func Trim(str, chars string) string { return LeftTrim(RightTrim(str, chars), chars) } // WhiteList removes characters that do not appear in the whitelist. func WhiteList(str, chars string) string { pattern := "[^" + chars + "]+" r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, "") } // BlackList removes characters that appear in the blacklist. func BlackList(str, chars string) string { pattern := "[" + chars + "]+" r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, "") } // StripLow removes characters with a numerical value < 32 and 127, mostly control characters. // If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). func StripLow(str string, keepNewLines bool) string { chars := "" if keepNewLines { chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" } else { chars = "\x00-\x1F\x7F" } return BlackList(str, chars) } // ReplacePattern replaces regular expression pattern in string func ReplacePattern(str, pattern, replace string) string { r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, replace) } // Escape replaces <, >, & and " with HTML entities. var Escape = html.EscapeString func addSegment(inrune, segment []rune) []rune { if len(segment) == 0 { return inrune } if len(inrune) != 0 { inrune = append(inrune, '_') } inrune = append(inrune, segment...) return inrune } // UnderscoreToCamelCase converts from underscore separated form to camel case form. // Ex.: my_func => MyFunc func UnderscoreToCamelCase(s string) string { return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) } // CamelCaseToUnderscore converts from camel case form to underscore separated form. // Ex.: MyFunc => my_func func CamelCaseToUnderscore(str string) string { var output []rune var segment []rune for _, r := range str { // not treat number as separate segment if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { output = addSegment(output, segment) segment = nil } segment = append(segment, unicode.ToLower(r)) } output = addSegment(output, segment) return string(output) } // Reverse returns reversed string func Reverse(s string) string { r := []rune(s) for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { r[i], r[j] = r[j], r[i] } return string(r) } // GetLines splits string by "\n" and return array of lines func GetLines(s string) []string { return strings.Split(s, "\n") } // GetLine returns specified line of multiline string func GetLine(s string, index int) (string, error) { lines := GetLines(s) if index < 0 || index >= len(lines) { return "", errors.New("line index out of bounds") } return lines[index], nil } // RemoveTags removes all tags from HTML string func RemoveTags(s string) string { return ReplacePattern(s, "<[^>]*>", "") } // SafeFileName returns safe string that can be used in file names func SafeFileName(str string) string { name := strings.ToLower(str) name = path.Clean(path.Base(name)) name = strings.Trim(name, " ") separators, err := regexp.Compile(`[ &_=+:]`) if err == nil { name = separators.ReplaceAllString(name, "-") } legal, err := regexp.Compile(`[^[:alnum:]-.]`) if err == nil { name = legal.ReplaceAllString(name, "") } for strings.Contains(name, "--") { name = strings.Replace(name, "--", "-", -1) } return name } // NormalizeEmail canonicalize an email address. // The local part of the email address is lowercased for all domains; the hostname is always lowercased and // the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). // Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and // are stripped of tags (e.g. [email protected] becomes [email protected]) and all @googlemail.com addresses are // normalized to @gmail.com. func NormalizeEmail(str string) (string, error) { if !IsEmail(str) { return "", fmt.Errorf("%s is not an email", str) } parts := strings.Split(str, "@") parts[0] = strings.ToLower(parts[0]) parts[1] = strings.ToLower(parts[1]) if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { parts[1] = "gmail.com" parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] } return strings.Join(parts, "@"), nil } // Truncate a string to the closest length without breaking words. func Truncate(str string, length int, ending string) string { var aftstr, befstr string if len(str) > length { words := strings.Fields(str) before, present := 0, 0 for i := range words { befstr = aftstr before = present aftstr = aftstr + words[i] + " " present = len(aftstr) if present > length && i != 0 { if (length - before) < (present - length) { return Trim(befstr, " /\\.,\"'#!?&@+-") + ending } return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending } } } return str } // PadLeft pads left side of a string if size of string is less then indicated pad length func PadLeft(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, true, false) } // PadRight pads right side of a string if size of string is less then indicated pad length func PadRight(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, false, true) } // PadBoth pads both sides of a string if size of string is less then indicated pad length func PadBoth(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, true, true) } // PadString either left, right or both sides. // Note that padding string can be unicode and more then one character func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { // When padded length is less then the current string size if padLen < utf8.RuneCountInString(str) { return str } padLen -= utf8.RuneCountInString(str) targetLen := padLen targetLenLeft := targetLen targetLenRight := targetLen if padLeft && padRight { targetLenLeft = padLen / 2 targetLenRight = padLen - targetLenLeft } strToRepeatLen := utf8.RuneCountInString(padStr) repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) repeatedString := strings.Repeat(padStr, repeatTimes) leftSide := "" if padLeft { leftSide = repeatedString[0:targetLenLeft] } rightSide := "" if padRight { rightSide = repeatedString[0:targetLenRight] } return leftSide + str + rightSide } // TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object func TruncatingErrorf(str string, args ...interface{}) error { n := strings.Count(str, "%s") return fmt.Errorf(str, args[:n]...) }
9,693
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/LICENSE
The MIT License (MIT) Copyright (c) 2014 Alex Saskevich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9,694
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/converter.go
package govalidator import ( "encoding/json" "fmt" "reflect" "strconv" ) // ToString convert the input to a string. func ToString(obj interface{}) string { res := fmt.Sprintf("%v", obj) return string(res) } // ToJSON convert the input to a valid JSON string func ToJSON(obj interface{}) (string, error) { res, err := json.Marshal(obj) if err != nil { res = []byte("") } return string(res), err } // ToFloat convert the input string to a float, or 0.0 if the input is not a float. func ToFloat(str string) (float64, error) { res, err := strconv.ParseFloat(str, 64) if err != nil { res = 0.0 } return res, err } // ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. func ToInt(value interface{}) (res int64, err error) { val := reflect.ValueOf(value) switch value.(type) { case int, int8, int16, int32, int64: res = val.Int() case uint, uint8, uint16, uint32, uint64: res = int64(val.Uint()) case string: if IsInt(val.String()) { res, err = strconv.ParseInt(val.String(), 0, 64) if err != nil { res = 0 } } else { err = fmt.Errorf("math: square root of negative number %g", value) res = 0 } default: err = fmt.Errorf("math: square root of negative number %g", value) res = 0 } return } // ToBoolean convert the input string to a boolean. func ToBoolean(str string) (bool, error) { return strconv.ParseBool(str) }
9,695
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
#### Support If you do have a contribution to the package, feel free to create a Pull Request or an Issue. #### What to contribute If you don't know what to do, there are some features and functions that need to be done - [ ] Refactor code - [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check - [ ] Create actual list of contributors and projects that currently using this package - [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) - [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) - [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new - [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc - [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) - [ ] Implement fuzzing testing - [ ] Implement some struct/map/array utilities - [ ] Implement map/array validation - [ ] Implement benchmarking - [ ] Implement batch of examples - [ ] Look at forks for new features and fixes #### Advice Feel free to create what you want, but keep in mind when you implement new features: - Code must be clear and readable, names of variables/constants clearly describes what they are doing - Public functions must be documented and described in source file and added to README.md to the list of available functions - There are must be unit-tests for any new functions and improvements ## Financial contributions We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. ## Credits ### Contributors Thank you to all the people who have already contributed to govalidator! <a href="https://github.com/asaskevich/govalidator/graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a> ### Backers Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] <a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a> ### Sponsors Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) <a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
9,696
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/arrays.go
package govalidator // Iterator is the function that accepts element of slice/array and its index type Iterator func(interface{}, int) // ResultIterator is the function that accepts element of slice/array and its index and returns any result type ResultIterator func(interface{}, int) interface{} // ConditionIterator is the function that accepts element of slice/array and its index and returns boolean type ConditionIterator func(interface{}, int) bool // Each iterates over the slice and apply Iterator to every item func Each(array []interface{}, iterator Iterator) { for index, data := range array { iterator(data, index) } } // Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. func Map(array []interface{}, iterator ResultIterator) []interface{} { var result = make([]interface{}, len(array)) for index, data := range array { result[index] = iterator(data, index) } return result } // Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. func Find(array []interface{}, iterator ConditionIterator) interface{} { for index, data := range array { if iterator(data, index) { return data } } return nil } // Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. func Filter(array []interface{}, iterator ConditionIterator) []interface{} { var result = make([]interface{}, 0) for index, data := range array { if iterator(data, index) { result = append(result, data) } } return result } // Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. func Count(array []interface{}, iterator ConditionIterator) int { count := 0 for index, data := range array { if iterator(data, index) { count = count + 1 } } return count }
9,697
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/doc.go
package govalidator // A package of validators and sanitizers for strings, structures and collections.
9,698
0
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich
kubeflow_public_repos/fate-operator/vendor/github.com/asaskevich/govalidator/patterns.go
package govalidator import "regexp" // Basic regular expressions for validating strings const ( Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" ISBN13 string = "^(?:[0-9]{13})$" UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" Alpha string = "^[a-zA-Z]+$" Alphanumeric string = "^[a-zA-Z0-9]+$" Numeric string = "^[0-9]+$" Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" Hexadecimal string = "^[0-9a-fA-F]+$" Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" ASCII string = "^[\x00-\x7F]+$" Multibyte string = "[^\x00-\x7F]" FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" PrintableASCII string = "^[\x20-\x7E]+$" DataURI string = "^data:.+\\/(.+);base64$" MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` URLUsername string = `(\S+(:\S*)?@)` URLPath string = `((\/|\?|#)[^\s]*)` URLPort string = `(:(\d{1,5}))` URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` UnixPath string = `^(/[^/\x00]*)+/?$` Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" tagName string = "valid" hasLowerCase string = ".*[[:lower:]]" hasUpperCase string = ".*[[:upper:]]" hasWhitespace string = ".*[[:space:]]" hasWhitespaceOnly string = "^[[:space:]]+$" IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" ) // Used by IsFilePath func const ( // Unknown is unresolved OS type Unknown = iota // Win is Windows type Win // Unix is *nix OS types Unix ) var ( userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") rxEmail = regexp.MustCompile(Email) rxCreditCard = regexp.MustCompile(CreditCard) rxISBN10 = regexp.MustCompile(ISBN10) rxISBN13 = regexp.MustCompile(ISBN13) rxUUID3 = regexp.MustCompile(UUID3) rxUUID4 = regexp.MustCompile(UUID4) rxUUID5 = regexp.MustCompile(UUID5) rxUUID = regexp.MustCompile(UUID) rxAlpha = regexp.MustCompile(Alpha) rxAlphanumeric = regexp.MustCompile(Alphanumeric) rxNumeric = regexp.MustCompile(Numeric) rxInt = regexp.MustCompile(Int) rxFloat = regexp.MustCompile(Float) rxHexadecimal = regexp.MustCompile(Hexadecimal) rxHexcolor = regexp.MustCompile(Hexcolor) rxRGBcolor = regexp.MustCompile(RGBcolor) rxASCII = regexp.MustCompile(ASCII) rxPrintableASCII = regexp.MustCompile(PrintableASCII) rxMultibyte = regexp.MustCompile(Multibyte) rxFullWidth = regexp.MustCompile(FullWidth) rxHalfWidth = regexp.MustCompile(HalfWidth) rxBase64 = regexp.MustCompile(Base64) rxDataURI = regexp.MustCompile(DataURI) rxMagnetURI = regexp.MustCompile(MagnetURI) rxLatitude = regexp.MustCompile(Latitude) rxLongitude = regexp.MustCompile(Longitude) rxDNSName = regexp.MustCompile(DNSName) rxURL = regexp.MustCompile(URL) rxSSN = regexp.MustCompile(SSN) rxWinPath = regexp.MustCompile(WinPath) rxUnixPath = regexp.MustCompile(UnixPath) rxSemver = regexp.MustCompile(Semver) rxHasLowerCase = regexp.MustCompile(hasLowerCase) rxHasUpperCase = regexp.MustCompile(hasUpperCase) rxHasWhitespace = regexp.MustCompile(hasWhitespace) rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) rxIMEI = regexp.MustCompile(IMEI) )
9,699