index
int64
0
0
repo_id
stringlengths
21
232
file_path
stringlengths
34
259
content
stringlengths
1
14.1M
__index_level_0__
int64
0
10k
0
kubeflow_public_repos/fate-operator/vendor/github.com/ghodss
kubeflow_public_repos/fate-operator/vendor/github.com/ghodss/yaml/.travis.yml
language: go go: - 1.3 - 1.4 script: - go test - go build
8,700
0
kubeflow_public_repos/fate-operator/vendor/github.com/ghodss
kubeflow_public_repos/fate-operator/vendor/github.com/ghodss/yaml/fields.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package yaml import ( "bytes" "encoding" "encoding/json" "reflect" "sort" "strings" "sync" "unicode" "unicode/utf8" ) // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { v = v.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { v = e continue } } if v.Kind() != reflect.Ptr { break } if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { break } if v.IsNil() { if v.CanSet() { v.Set(reflect.New(v.Type().Elem())) } else { v = reflect.New(v.Type().Elem()) } } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } v = v.Elem() } return nil, nil, v } // A field represents a single field found in a struct. type field struct { name string nameBytes []byte // []byte(name) equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent tag bool index []int typ reflect.Type omitEmpty bool quoted bool } func fillField(f field) field { f.nameBytes = []byte(f.name) f.equalFold = foldFunc(f.nameBytes) return f } // byName sorts field by name, breaking ties with depth, // then breaking ties with "name came from json tag", then // breaking ties with index sequence. type byName []field func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } if x[i].tag != x[j].tag { return x[i].tag } return byIndex(x).Less(i, j) } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. count := map[reflect.Type]int{} nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) if sf.PkgPath != "" { // unexported continue } tag := sf.Tag.Get("json") if tag == "-" { continue } name, opts := parseTag(tag) if !isValidTag(name) { name = "" } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } // Record found field and index sequence. if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { tagged := name != "" if name == "" { name = sf.Name } fields = append(fields, fillField(field{ name: name, tag: tagged, index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), quoted: opts.Contains("string"), })) if count[f.typ] > 1 { // If there were multiple instances, add a second, // so that the annihilation code will see a duplicate. // It only cares about the distinction between 1 or 2, // so don't bother generating any more copies. fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) } } } } sort.Sort(byName(fields)) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } // dominantField looks through the fields, all of which are known to // have the same name, to find the single field that dominates the // others using Go's embedding rules, modified by the presence of // JSON tags. If there are multiple top-level fields, the boolean // will be false: This condition is an error in Go and we skip all // the fields. func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } if f.tag { if tagged >= 0 { // Multiple tagged fields at the same level: conflict. // Return no field. return field{}, false } tagged = i } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { sync.RWMutex m map[reflect.Type][]field } // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. func cachedTypeFields(t reflect.Type) []field { fieldCache.RLock() f := fieldCache.m[t] fieldCache.RUnlock() if f != nil { return f } // Compute fields without lock. // Might duplicate effort but won't hold other computations back. f = typeFields(t) if f == nil { f = []field{} } fieldCache.Lock() if fieldCache.m == nil { fieldCache.m = map[reflect.Type][]field{} } fieldCache.m[t] = f fieldCache.Unlock() return f } func isValidTag(s string) bool { if s == "" { return false } for _, c := range s { switch { case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. default: if !unicode.IsLetter(c) && !unicode.IsDigit(c) { return false } } } return true } const ( caseMask = ^byte(0x20) // Mask to ignore case in ASCII. kelvin = '\u212a' smallLongEss = '\u017f' ) // foldFunc returns one of four different case folding equivalence // functions, from most general (and slow) to fastest: // // 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 // 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') // 3) asciiEqualFold, no special, but includes non-letters (including _) // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: // * S maps to s and to U+017F 'ſ' Latin small letter long s // * k maps to K and to U+212A 'K' Kelvin sign // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and // should only be given s. It's not curried for performance reasons. func foldFunc(s []byte) func(s, t []byte) bool { nonLetter := false special := false // special letter for _, b := range s { if b >= utf8.RuneSelf { return bytes.EqualFold } upper := b & caseMask if upper < 'A' || upper > 'Z' { nonLetter = true } else if upper == 'K' || upper == 'S' { // See above for why these letters are special. special = true } } if special { return equalFoldRight } if nonLetter { return asciiEqualFold } return simpleLetterEqualFold } // equalFoldRight is a specialization of bytes.EqualFold when s is // known to be all ASCII (including punctuation), but contains an 's', // 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. // See comments on foldFunc. func equalFoldRight(s, t []byte) bool { for _, sb := range s { if len(t) == 0 { return false } tb := t[0] if tb < utf8.RuneSelf { if sb != tb { sbUpper := sb & caseMask if 'A' <= sbUpper && sbUpper <= 'Z' { if sbUpper != tb&caseMask { return false } } else { return false } } t = t[1:] continue } // sb is ASCII and t is not. t must be either kelvin // sign or long s; sb must be s, S, k, or K. tr, size := utf8.DecodeRune(t) switch sb { case 's', 'S': if tr != smallLongEss { return false } case 'k', 'K': if tr != kelvin { return false } default: return false } t = t[size:] } if len(t) > 0 { return false } return true } // asciiEqualFold is a specialization of bytes.EqualFold for use when // s is all ASCII (but may contain non-letters) and contains no // special-folding letters. // See comments on foldFunc. func asciiEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, sb := range s { tb := t[i] if sb == tb { continue } if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { if sb&caseMask != tb&caseMask { return false } } else { return false } } return true } // simpleLetterEqualFold is a specialization of bytes.EqualFold for // use when s is all ASCII letters (no underscores, etc) and also // doesn't contain 'k', 'K', 's', or 'S'. // See comments on foldFunc. func simpleLetterEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, b := range s { if b&caseMask != t[i]&caseMask { return false } } return true } // tagOptions is the string following a comma in a struct field's "json" // tag, or the empty string. It does not include the leading comma. type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { if idx := strings.Index(tag, ","); idx != -1 { return tag[:idx], tagOptions(tag[idx+1:]) } return tag, tagOptions("") } // Contains reports whether a comma-separated list of options // contains a particular substr flag. substr must be surrounded by a // string boundary or commas. func (o tagOptions) Contains(optionName string) bool { if len(o) == 0 { return false } s := string(o) for s != "" { var next string i := strings.Index(s, ",") if i >= 0 { s, next = s[:i], s[i+1:] } if s == optionName { return true } s = next } return false }
8,701
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/lru.go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // Cache is a thread-safe fixed size LRU cache. type Cache struct { lru simplelru.LRUCache lock sync.RWMutex } // New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) if err != nil { return nil, err } c := &Cache{ lru: lru, } return c, nil } // Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() c.lock.Unlock() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) c.lock.Unlock() return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false } evicted = c.lru.Add(key, value) return false, evicted } // PeekOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() previous, ok = c.lru.Peek(key) if ok { return previous, true, false } evicted = c.lru.Add(key, value) return nil, false, evicted } // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() present = c.lru.Remove(key) c.lock.Unlock() return } // Resize changes the cache size. func (c *Cache) Resize(size int) (evicted int) { c.lock.Lock() evicted = c.lru.Resize(size) c.lock.Unlock() return evicted } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.RemoveOldest() c.lock.Unlock() return } // GetOldest returns the oldest entry func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() key, value, ok = c.lru.GetOldest() c.lock.Unlock() return } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() return length }
8,702
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/go.mod
module github.com/hashicorp/golang-lru go 1.12
8,703
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/README.md
golang-lru ========== This provides the `lru` package which implements a fixed-size thread safe LRU cache. It is based on the cache in Groupcache. Documentation ============= Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) Example ======= Using the LRU is very simple: ```go l, _ := New(128) for i := 0; i < 256; i++ { l.Add(i, nil) } if l.Len() != 128 { panic(fmt.Sprintf("bad len: %v", l.Len())) } ```
8,704
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/LICENSE
Mozilla Public License, version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0.
8,705
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/arc.go
package lru import ( "sync" "github.com/hashicorp/golang-lru/simplelru" ) // ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). // ARC is an enhancement over the standard LRU cache in that tracks both // frequency and recency of use. This avoids a burst in access to new // entries from evicting the frequently used older entries. It adds some // additional tracking overhead to a standard LRU cache, computationally // it is roughly 2x the cost, and the extra memory overhead is linear // with the size of the cache. ARC has been patented by IBM, but is // similar to the TwoQueueCache (2Q) which requires setting parameters. type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 t1 simplelru.LRUCache // T1 is the LRU for recently accessed items b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } // NewARC creates an ARC of the given size func NewARC(size int) (*ARCCache, error) { // Create the sub LRUs b1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } b2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t1, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } t2, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } // Initialize the ARC c := &ARCCache{ size: size, p: 0, t1: t1, b1: b1, t2: t2, b2: b2, } return c, nil } // Get looks up a key's value from the cache. func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) c.t2.Add(key, val) return val, ok } // Check if the value is contained in T2 (frequent) if val, ok := c.t2.Get(key); ok { return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *ARCCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is contained in T1 (recent), and potentially // promote it to frequent T2 if c.t1.Contains(key) { c.t1.Remove(key) c.t2.Add(key, value) return } // Check if the value is already in T2 (frequent) and update it if c.t2.Contains(key) { c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // recently used list if c.b1.Contains(key) { // T1 set is too small, increase P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b2Len > b1Len { delta = b2Len / b1Len } if c.p+delta >= c.size { c.p = c.size } else { c.p += delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Remove from B1 c.b1.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Check if this value was recently evicted as part of the // frequently used list if c.b2.Contains(key) { // T2 set is too small, decrease P appropriately delta := 1 b1Len := c.b1.Len() b2Len := c.b2.Len() if b1Len > b2Len { delta = b1Len / b2Len } if delta >= c.p { c.p = 0 } else { c.p -= delta } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(true) } // Remove from B2 c.b2.Remove(key) // Add the key to the frequently used list c.t2.Add(key, value) return } // Potentially need to make room in the cache if c.t1.Len()+c.t2.Len() >= c.size { c.replace(false) } // Keep the size of the ghost buffers trim if c.b1.Len() > c.size-c.p { c.b1.RemoveOldest() } if c.b2.Len() > c.p { c.b2.RemoveOldest() } // Add to the recently seen list c.t1.Add(key, value) return } // replace is used to adaptively evict from either T1 or T2 // based on the current learned value of P func (c *ARCCache) replace(b2ContainsKey bool) { t1Len := c.t1.Len() if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { k, _, ok := c.t1.RemoveOldest() if ok { c.b1.Add(k, nil) } } else { k, _, ok := c.t2.RemoveOldest() if ok { c.b2.Add(k, nil) } } } // Len returns the number of cached entries func (c *ARCCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Len() + c.t2.Len() } // Keys returns all the cached keys func (c *ARCCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.t1.Keys() k2 := c.t2.Keys() return append(k1, k2...) } // Remove is used to purge a key from the cache func (c *ARCCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.t1.Remove(key) { return } if c.t2.Remove(key) { return } if c.b1.Remove(key) { return } if c.b2.Remove(key) { return } } // Purge is used to clear the cache func (c *ARCCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.t1.Purge() c.t2.Purge() c.b1.Purge() c.b2.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *ARCCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.t1.Contains(key) || c.t2.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { return val, ok } return c.t2.Peek(key) }
8,706
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/doc.go
// Package lru provides three different LRU caches of varying sophistication. // // Cache is a simple LRU cache. It is based on the // LRU implementation in groupcache: // https://github.com/golang/groupcache/tree/master/lru // // TwoQueueCache tracks frequently used and recently used entries separately. // This avoids a burst of accesses from taking out frequently used entries, // at the cost of about 2x computational overhead and some extra bookkeeping. // // ARCCache is an adaptive replacement cache. It tracks recent evictions as // well as recent usage in both the frequent and recent caches. Its // computational overhead is comparable to TwoQueueCache, but the memory // overhead is linear with the size of the cache. // // ARC has been patented by IBM, so do not use it if that is problematic for // your program. // // All caches in this package take locks while operating, and are therefore // thread-safe for consumers. package lru
8,707
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/2q.go
package lru import ( "fmt" "sync" "github.com/hashicorp/golang-lru/simplelru" ) const ( // Default2QRecentRatio is the ratio of the 2Q cache dedicated // to recently added entries that have only been accessed once. Default2QRecentRatio = 0.25 // Default2QGhostEntries is the default ratio of ghost // entries kept to track entries recently evicted Default2QGhostEntries = 0.50 ) // TwoQueueCache is a thread-safe fixed size 2Q cache. // 2Q is an enhancement over the standard LRU cache // in that it tracks both frequently and recently used // entries separately. This avoids a burst in access to new // entries from evicting frequently used entries. It adds some // additional tracking overhead to the standard LRU cache, and is // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. type TwoQueueCache struct { size int recentSize int recent simplelru.LRUCache frequent simplelru.LRUCache recentEvict simplelru.LRUCache lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. func New2Q(size int) (*TwoQueueCache, error) { return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { if size <= 0 { return nil, fmt.Errorf("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { return nil, fmt.Errorf("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { return nil, fmt.Errorf("invalid ghost ratio") } // Determine the sub-sizes recentSize := int(float64(size) * recentRatio) evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs recent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } frequent, err := simplelru.NewLRU(size, nil) if err != nil { return nil, err } recentEvict, err := simplelru.NewLRU(evictSize, nil) if err != nil { return nil, err } // Initialize the cache c := &TwoQueueCache{ size: size, recentSize: recentSize, recent: recent, frequent: frequent, recentEvict: recentEvict, } return c, nil } // Get looks up a key's value from the cache. func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() // Check if this is a frequent value if val, ok := c.frequent.Get(key); ok { return val, ok } // If the value is contained in recent, then we // promote it to frequent if val, ok := c.recent.Peek(key); ok { c.recent.Remove(key) c.frequent.Add(key, val) return val, ok } // No hit return nil, false } // Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() // Check if the value is frequently used already, // and just update the value if c.frequent.Contains(key) { c.frequent.Add(key, value) return } // Check if the value is recently used, and promote // the value into the frequent list if c.recent.Contains(key) { c.recent.Remove(key) c.frequent.Add(key, value) return } // If the value was recently evicted, add it to the // frequently used list if c.recentEvict.Contains(key) { c.ensureSpace(true) c.recentEvict.Remove(key) c.frequent.Add(key, value) return } // Add to the recently seen list c.ensureSpace(false) c.recent.Add(key, value) return } // ensureSpace is used to ensure we have space in the cache func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() if recentLen+freqLen < c.size { return } // If the recent buffer is larger than // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() c.recentEvict.Add(k, nil) return } // Remove from the frequent list otherwise c.frequent.RemoveOldest() } // Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() k2 := c.recent.Keys() return append(k1, k2...) } // Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { return } if c.recent.Remove(key) { return } if c.recentEvict.Remove(key) { return } } // Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() c.frequent.Purge() c.recentEvict.Purge() } // Contains is used to check if the cache contains a key // without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } // Peek is used to inspect the cache value of a key // without updating recency or frequency. func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { return val, ok } return c.recent.Peek(key) }
8,708
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
package simplelru import ( "container/list" "errors" ) // EvictCallback is used to get a callback when a cache entry is evicted type EvictCallback func(key interface{}, value interface{}) // LRU implements a non-thread safe fixed size LRU cache type LRU struct { size int evictList *list.List items map[interface{}]*list.Element onEvict EvictCallback } // entry is used to hold a value in the evictList type entry struct { key interface{} value interface{} } // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, evictList: list.New(), items: make(map[interface{}]*list.Element), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { c.onEvict(k, v.Value.(*entry).value) } delete(c.items, k) } c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) ent.Value.(*entry).value = value return false } // Add new item ent := &entry{key, value} entry := c.evictList.PushFront(ent) c.items[key] = entry evict := c.evictList.Len() > c.size // Verify size not exceeded if evict { c.removeOldest() } return evict } // Get looks up a key's value from the cache. func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) if ent.Value.(*entry) == nil { return nil, false } return ent.Value.(*entry).value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { var ent *list.Element if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok } // Remove removes the provided key from the cache, returning if the // key was contained. func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true } return false } // RemoveOldest removes the oldest item from the cache. func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // GetOldest returns the oldest entry func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) return kv.key, kv.value, true } return nil, nil, false } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU) Keys() []interface{} { keys := make([]interface{}, len(c.items)) i := 0 for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { keys[i] = ent.Value.(*entry).key i++ } return keys } // Len returns the number of items in the cache. func (c *LRU) Len() int { return c.evictList.Len() } // Resize changes the cache size. func (c *LRU) Resize(size int) (evicted int) { diff := c.Len() - size if diff < 0 { diff = 0 } for i := 0; i < diff; i++ { c.removeOldest() } c.size = size return diff } // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache func (c *LRU) removeElement(e *list.Element) { c.evictList.Remove(e) kv := e.Value.(*entry) delete(c.items, kv.key) if c.onEvict != nil { c.onEvict(kv.key, kv.value) } }
8,709
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
package simplelru // LRUCache is the interface for simple LRU cache. type LRUCache interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. Add(key, value interface{}) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. Peek(key interface{}) (value interface{}, ok bool) // Removes a key from the cache. Remove(key interface{}) bool // Removes the oldest entry from cache. RemoveOldest() (interface{}, interface{}, bool) // Returns the oldest entry from the cache. #key, value, isFound GetOldest() (interface{}, interface{}, bool) // Returns a slice of the keys in the cache, from oldest to newest. Keys() []interface{} // Returns the number of items in the cache. Len() int // Clears all cache entries. Purge() // Resizes cache, returning number evicted Resize(int) int }
8,710
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl.go
// Package hcl decodes HCL into usable Go structures. // // hcl input can come in either pure HCL format or JSON format. // It can be parsed into an AST, and then decoded into a structure, // or it can be decoded directly from a string into a structure. // // If you choose to parse HCL into a raw AST, the benefit is that you // can write custom visitor implementations to implement custom // semantic checks. By default, HCL does not perform any semantic // checks. package hcl
8,711
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/go.mod
module github.com/hashicorp/hcl require github.com/davecgh/go-spew v1.1.1
8,712
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/README.md
# HCL [![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) HCL (HashiCorp Configuration Language) is a configuration language built by HashiCorp. The goal of HCL is to build a structured configuration language that is both human and machine friendly for use with command-line tools, but specifically targeted towards DevOps tools, servers, etc. HCL is also fully JSON compatible. That is, JSON can be used as completely valid input to a system expecting HCL. This helps makes systems interoperable with other systems. HCL is heavily inspired by [libucl](https://github.com/vstakhov/libucl), nginx configuration, and others similar. ## Why? A common question when viewing HCL is to ask the question: why not JSON, YAML, etc.? Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) used a variety of configuration languages from full programming languages such as Ruby to complete data structure languages such as JSON. What we learned is that some people wanted human-friendly configuration languages and some people wanted machine-friendly languages. JSON fits a nice balance in this, but is fairly verbose and most importantly doesn't support comments. With YAML, we found that beginners had a really hard time determining what the actual structure was, and ended up guessing more often than not whether to use a hyphen, colon, etc. in order to represent some configuration key. Full programming languages such as Ruby enable complex behavior a configuration language shouldn't usually allow, and also forces people to learn some set of Ruby. Because of this, we decided to create our own configuration language that is JSON-compatible. Our configuration language (HCL) is designed to be written and modified by humans. The API for HCL allows JSON as an input so that it is also machine-friendly (machines can generate JSON instead of trying to generate HCL). Our goal with HCL is not to alienate other configuration languages. It is instead to provide HCL as a specialized language for our tools, and JSON as the interoperability layer. ## Syntax For a complete grammar, please see the parser itself. A high-level overview of the syntax and grammar is listed here. * Single line comments start with `#` or `//` * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments are not allowed. A multi-line comment (also known as a block comment) terminates at the first `*/` found. * Values are assigned with the syntax `key = value` (whitespace doesn't matter). The value can be any primitive: a string, number, boolean, object, or list. * Strings are double-quoted and can contain any UTF-8 characters. Example: `"Hello, World"` * Multi-line strings start with `<<EOF` at the end of a line, and end with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)). Any text may be used in place of `EOF`. Example: ``` <<FOO hello world FOO ``` * Numbers are assumed to be base 10. If you prefix a number with 0x, it is treated as a hexadecimal. If it is prefixed with 0, it is treated as an octal. Numbers can be in scientific notation: "1e10". * Boolean values: `true`, `false` * Arrays can be made by wrapping it in `[]`. Example: `["foo", "bar", 42]`. Arrays can contain primitives, other arrays, and objects. As an alternative, lists of objects can be created with repeated blocks, using this structure: ```hcl service { key = "value" } service { key = "value" } ``` Objects and nested objects are created using the structure shown below: ``` variable "ami" { description = "the AMI to use" } ``` This would be equivalent to the following json: ``` json { "variable": { "ami": { "description": "the AMI to use" } } } ``` ## Thanks Thanks to: * [@vstakhov](https://github.com/vstakhov) - The original libucl parser and syntax that HCL was based off of. * [@fatih](https://github.com/fatih) - The rewritten HCL parser in pure Go (no goyacc) and support for a printer.
8,713
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/Makefile
TEST?=./... default: test fmt: generate go fmt ./... test: generate go get -t ./... go test $(TEST) $(TESTARGS) generate: go generate ./... updatedeps: go get -u golang.org/x/tools/cmd/stringer .PHONY: default generate test updatedeps
8,714
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/parse.go
package hcl import ( "fmt" "github.com/hashicorp/hcl/hcl/ast" hclParser "github.com/hashicorp/hcl/hcl/parser" jsonParser "github.com/hashicorp/hcl/json/parser" ) // ParseBytes accepts as input byte slice and returns ast tree. // // Input can be either JSON or HCL func ParseBytes(in []byte) (*ast.File, error) { return parse(in) } // ParseString accepts input as a string and returns ast tree. func ParseString(input string) (*ast.File, error) { return parse([]byte(input)) } func parse(in []byte) (*ast.File, error) { switch lexMode(in) { case lexModeHcl: return hclParser.Parse(in) case lexModeJson: return jsonParser.Parse(in) } return nil, fmt.Errorf("unknown config format") } // Parse parses the given input and returns the root object. // // The input format can be either HCL or JSON. func Parse(input string) (*ast.File, error) { return parse([]byte(input)) }
8,715
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/lex.go
package hcl import ( "unicode" "unicode/utf8" ) type lexModeValue byte const ( lexModeUnknown lexModeValue = iota lexModeHcl lexModeJson ) // lexMode returns whether we're going to be parsing in JSON // mode or HCL mode. func lexMode(v []byte) lexModeValue { var ( r rune w int offset int ) for { r, w = utf8.DecodeRune(v[offset:]) offset += w if unicode.IsSpace(r) { continue } if r == '{' { return lexModeJson } break } return lexModeHcl }
8,716
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/LICENSE
Mozilla Public License, version 2.0 1. Definitions 1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution. 1.3. “Contribution” means Covered Software of a particular Contributor. 1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. “Executable Form” means any form of the work other than Source Code Form. 1.7. “Larger Work” means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. “License” means this document. 1.9. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. “Modifications” means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. “Patent Claims” of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. “Source Code Form” means the form of the work preferred for making modifications. 1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0.
8,717
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/appveyor.yml
version: "build-{branch}-{build}" image: Visual Studio 2015 clone_folder: c:\gopath\src\github.com\hashicorp\hcl environment: GOPATH: c:\gopath init: - git config --global core.autocrlf false install: - cmd: >- echo %Path% go version go env go get -t ./... build_script: - cmd: go test -v ./...
8,718
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/go.sum
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
8,719
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/decoder.go
package hcl import ( "errors" "fmt" "reflect" "sort" "strconv" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" "github.com/hashicorp/hcl/hcl/token" ) // This is the tag to use with structures to have settings for HCL const tagName = "hcl" var ( // nodeType holds a reference to the type of ast.Node nodeType reflect.Type = findNodeType() ) // Unmarshal accepts a byte slice as input and writes the // data to the value pointed to by v. func Unmarshal(bs []byte, v interface{}) error { root, err := parse(bs) if err != nil { return err } return DecodeObject(v, root) } // Decode reads the given input and decodes it into the structure // given by `out`. func Decode(out interface{}, in string) error { obj, err := Parse(in) if err != nil { return err } return DecodeObject(out, obj) } // DecodeObject is a lower-level version of Decode. It decodes a // raw Object into the given output. func DecodeObject(out interface{}, n ast.Node) error { val := reflect.ValueOf(out) if val.Kind() != reflect.Ptr { return errors.New("result must be a pointer") } // If we have the file, we really decode the root node if f, ok := n.(*ast.File); ok { n = f.Node } var d decoder return d.decode("root", n, val.Elem()) } type decoder struct { stack []reflect.Kind } func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { k := result // If we have an interface with a valid value, we use that // for the check. if result.Kind() == reflect.Interface { elem := result.Elem() if elem.IsValid() { k = elem } } // Push current onto stack unless it is an interface. if k.Kind() != reflect.Interface { d.stack = append(d.stack, k.Kind()) // Schedule a pop defer func() { d.stack = d.stack[:len(d.stack)-1] }() } switch k.Kind() { case reflect.Bool: return d.decodeBool(name, node, result) case reflect.Float32, reflect.Float64: return d.decodeFloat(name, node, result) case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) case reflect.Interface: // When we see an interface, we make our own thing return d.decodeInterface(name, node, result) case reflect.Map: return d.decodeMap(name, node, result) case reflect.Ptr: return d.decodePtr(name, node, result) case reflect.Slice: return d.decodeSlice(name, node, result) case reflect.String: return d.decodeString(name, node, result) case reflect.Struct: return d.decodeStruct(name, node, result) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), } } } func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.BOOL { v, err := strconv.ParseBool(n.Token.Text) if err != nil { return err } result.Set(reflect.ValueOf(v)) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { v, err := strconv.ParseFloat(n.Token.Text, 64) if err != nil { return err } result.Set(reflect.ValueOf(v).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: v, err := strconv.ParseInt(n.Token.Text, 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil case token.STRING: v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { // When we see an ast.Node, we retain the value to enable deferred decoding. // Very useful in situations where we want to preserve ast.Node information // like Pos if result.Type() == nodeType && result.CanSet() { result.Set(reflect.ValueOf(node)) return nil } var set reflect.Value redecode := true // For testing types, ObjectType should just be treated as a list. We // set this to a temporary var because we want to pass in the real node. testNode := node if ot, ok := node.(*ast.ObjectType); ok { testNode = ot.List } switch n := testNode.(type) { case *ast.ObjectList: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) set = result } case *ast.ObjectType: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 1) set = result } case *ast.ListType: var temp []interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 0) set = result case *ast.LiteralType: switch n.Token.Type { case token.BOOL: var result bool set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.FLOAT: var result float64 set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.NUMBER: var result int set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.STRING, token.HEREDOC: set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), } } default: return fmt.Errorf( "%s: cannot decode into interface: %T", name, node) } // Set the result to what its supposed to be, then reset // result so we don't reflect into this method anymore. result.Set(set) if redecode { // Revisit the node so that we can use the newly instantiated // thing and populate it. if err := d.decode(name, node, result); err != nil { return err } } return nil } func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { if item, ok := node.(*ast.ObjectItem); ok { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } n, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), } } // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } resultType := result.Type() resultElemType := resultType.Elem() resultKeyType := resultType.Key() if resultKeyType.Kind() != reflect.String { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Make a map if it is nil resultMap := result if result.IsNil() { resultMap = reflect.MakeMap( reflect.MapOf(resultKeyType, resultElemType)) } // Go through each element and decode it. done := make(map[string]struct{}) for _, item := range n.Items { if item.Val == nil { continue } // github.com/hashicorp/terraform/issue/5740 if len(item.Keys) == 0 { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Get the key we're dealing with, which is the first item keyStr := item.Keys[0].Token.Value().(string) // If we've already processed this key, then ignore it if _, ok := done[keyStr]; ok { continue } // Determine the value. If we have more than one key, then we // get the objectlist of only these keys. itemVal := item.Val if len(item.Keys) > 1 { itemVal = n.Filter(keyStr) done[keyStr] = struct{}{} } // Make the field name fieldName := fmt.Sprintf("%s.%s", name, keyStr) // Get the key/value as reflection values key := reflect.ValueOf(keyStr) val := reflect.Indirect(reflect.New(resultElemType)) // If we have a pre-existing value in the map, use that oldVal := resultMap.MapIndex(key) if oldVal.IsValid() { val.Set(oldVal) } // Decode! if err := d.decode(fieldName, itemVal, val); err != nil { return err } // Set the value on the map resultMap.SetMapIndex(key, val) } // Set the final map if we can set.Set(resultMap) return nil } func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. resultType := result.Type() resultElemType := resultType.Elem() val := reflect.New(resultElemType) if err := d.decode(name, node, reflect.Indirect(val)); err != nil { return err } result.Set(val) return nil } func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } // Create the slice if it isn't nil resultType := result.Type() resultElemType := resultType.Elem() if result.IsNil() { resultSliceType := reflect.SliceOf(resultElemType) result = reflect.MakeSlice( resultSliceType, 0, 0) } // Figure out the items we'll be copying into the slice var items []ast.Node switch n := node.(type) { case *ast.ObjectList: items = make([]ast.Node, len(n.Items)) for i, item := range n.Items { items[i] = item } case *ast.ObjectType: items = []ast.Node{n} case *ast.ListType: items = n.List default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("unknown slice type: %T", node), } } for i, item := range items { fieldName := fmt.Sprintf("%s[%d]", name, i) // Decode val := reflect.Indirect(reflect.New(resultElemType)) // if item is an object that was decoded from ambiguous JSON and // flattened, make sure it's expanded if it needs to decode into a // defined structure. item := expandObject(item, val) if err := d.decode(fieldName, item, val); err != nil { return err } // Append it onto the slice result = reflect.Append(result, val) } set.Set(result) return nil } // expandObject detects if an ambiguous JSON object was flattened to a List which // should be decoded into a struct, and expands the ast to properly deocode. func expandObject(node ast.Node, result reflect.Value) ast.Node { item, ok := node.(*ast.ObjectItem) if !ok { return node } elemType := result.Type() // our target type must be a struct switch elemType.Kind() { case reflect.Ptr: switch elemType.Elem().Kind() { case reflect.Struct: //OK default: return node } case reflect.Struct: //OK default: return node } // A list value will have a key and field name. If it had more fields, // it wouldn't have been flattened. if len(item.Keys) != 2 { return node } keyToken := item.Keys[0].Token item.Keys = item.Keys[1:] // we need to un-flatten the ast enough to decode newNode := &ast.ObjectItem{ Keys: []*ast.ObjectKey{ &ast.ObjectKey{ Token: keyToken, }, }, Val: &ast.ObjectType{ List: &ast.ObjectList{ Items: []*ast.ObjectItem{item}, }, }, } return newNode } func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) return nil case token.STRING, token.HEREDOC: result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type for string %T", name, node), } } func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { var item *ast.ObjectItem if it, ok := node.(*ast.ObjectItem); ok { item = it node = it.Val } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } // Handle the special case where the object itself is a literal. Previously // the yacc parser would always ensure top-level elements were arrays. The new // parser does not make the same guarantees, thus we need to convert any // top-level literal elements into a list. if _, ok := node.(*ast.LiteralType); ok && item != nil { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } list, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), } } // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs // that are squashed. structs := make([]reflect.Value, 1, 5) structs[0] = result // Compile the list of all the fields that we're going to be decoding // from all the structs. type field struct { field reflect.StructField val reflect.Value } fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] structType := structVal.Type() for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") // Ignore fields with tag name "-" if tagParts[0] == "-" { continue } if fieldType.Anonymous { fieldKind := fieldType.Type.Kind() if fieldKind != reflect.Struct { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unsupported type to struct: %s", fieldType.Name, fieldKind), } } // We have an embedded field. We "squash" the fields down // if specified in the tag. squash := false for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } } if squash { structs = append( structs, result.FieldByName(fieldType.Name)) continue } } // Normal struct field, store it away fields = append(fields, field{fieldType, structVal.Field(i)}) } } usedKeys := make(map[string]struct{}) decodedFields := make([]string, 0, len(fields)) decodedFieldsVal := make([]reflect.Value, 0) unusedKeysVal := make([]reflect.Value, 0) for _, f := range fields { field, fieldValue := f.field, f.val if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. if !fieldValue.CanSet() { continue } fieldName := field.Name tagValue := field.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { case "decodedFields": decodedFieldsVal = append(decodedFieldsVal, fieldValue) continue case "key": if item == nil { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: %s asked for 'key', impossible", name, fieldName), } } fieldValue.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": unusedKeysVal = append(unusedKeysVal, fieldValue) continue } } if tagParts[0] != "" { fieldName = tagParts[0] } // Determine the element we'll use to decode. If it is a single // match (only object with the field), then we decode it exactly. // If it is a prefix match, then we decode the matches. filter := list.Filter(fieldName) prefixMatches := filter.Children() matches := filter.Elem() if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { continue } // Track the used key usedKeys[fieldName] = struct{}{} // Create the field name and decode. We range over the elements // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) if len(prefixMatches.Items) > 0 { if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { return err } } for _, match := range matches.Items { var decodeNode ast.Node = match.Val if ot, ok := decodeNode.(*ast.ObjectType); ok { decodeNode = &ast.ObjectList{Items: ot.List.Items} } if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { return err } } decodedFields = append(decodedFields, field.Name) } if len(decodedFieldsVal) > 0 { // Sort it so that it is deterministic sort.Strings(decodedFields) for _, v := range decodedFieldsVal { v.Set(reflect.ValueOf(decodedFields)) } } return nil } // findNodeType returns the type of ast.Node func findNodeType() reflect.Type { var nodeContainer struct { Node ast.Node } value := reflect.ValueOf(nodeContainer).FieldByName("Node") return value.Type() }
8,720
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/.travis.yml
sudo: false language: go go: - 1.x - tip branches: only: - master script: make test
8,721
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
// Package ast declares the types used to represent syntax trees for HCL // (HashiCorp Configuration Language) package ast import ( "fmt" "strings" "github.com/hashicorp/hcl/hcl/token" ) // Node is an element in the abstract syntax tree. type Node interface { node() Pos() token.Pos } func (File) node() {} func (ObjectList) node() {} func (ObjectKey) node() {} func (ObjectItem) node() {} func (Comment) node() {} func (CommentGroup) node() {} func (ObjectType) node() {} func (LiteralType) node() {} func (ListType) node() {} // File represents a single HCL file type File struct { Node Node // usually a *ObjectList Comments []*CommentGroup // list of all comments in the source } func (f *File) Pos() token.Pos { return f.Node.Pos() } // ObjectList represents a list of ObjectItems. An HCL file itself is an // ObjectList. type ObjectList struct { Items []*ObjectItem } func (o *ObjectList) Add(item *ObjectItem) { o.Items = append(o.Items, item) } // Filter filters out the objects with the given key list as a prefix. // // The returned list of objects contain ObjectItems where the keys have // this prefix already stripped off. This might result in objects with // zero-length key lists if they have no children. // // If no matches are found, an empty ObjectList (non-nil) is returned. func (o *ObjectList) Filter(keys ...string) *ObjectList { var result ObjectList for _, item := range o.Items { // If there aren't enough keys, then ignore this if len(item.Keys) < len(keys) { continue } match := true for i, key := range item.Keys[:len(keys)] { key := key.Token.Value().(string) if key != keys[i] && !strings.EqualFold(key, keys[i]) { match = false break } } if !match { continue } // Strip off the prefix from the children newItem := *item newItem.Keys = newItem.Keys[len(keys):] result.Add(&newItem) } return &result } // Children returns further nested objects (key length > 0) within this // ObjectList. This should be used with Filter to get at child items. func (o *ObjectList) Children() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) > 0 { result.Add(item) } } return &result } // Elem returns items in the list that are direct element assignments // (key length == 0). This should be used with Filter to get at elements. func (o *ObjectList) Elem() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) == 0 { result.Add(item) } } return &result } func (o *ObjectList) Pos() token.Pos { // always returns the uninitiliazed position return o.Items[0].Pos() } // ObjectItem represents a HCL Object Item. An item is represented with a key // (or keys). It can be an assignment or an object (both normal and nested) type ObjectItem struct { // keys is only one length long if it's of type assignment. If it's a // nested object it can be larger than one. In that case "assign" is // invalid as there is no assignments for a nested object. Keys []*ObjectKey // assign contains the position of "=", if any Assign token.Pos // val is the item itself. It can be an object,list, number, bool or a // string. If key length is larger than one, val can be only of type // Object. Val Node LeadComment *CommentGroup // associated lead comment LineComment *CommentGroup // associated line comment } func (o *ObjectItem) Pos() token.Pos { // I'm not entirely sure what causes this, but removing this causes // a test failure. We should investigate at some point. if len(o.Keys) == 0 { return token.Pos{} } return o.Keys[0].Pos() } // ObjectKeys are either an identifier or of type string. type ObjectKey struct { Token token.Token } func (o *ObjectKey) Pos() token.Pos { return o.Token.Pos } // LiteralType represents a literal of basic type. Valid types are: // token.NUMBER, token.FLOAT, token.BOOL and token.STRING type LiteralType struct { Token token.Token // comment types, only used when in a list LeadComment *CommentGroup LineComment *CommentGroup } func (l *LiteralType) Pos() token.Pos { return l.Token.Pos } // ListStatement represents a HCL List type type ListType struct { Lbrack token.Pos // position of "[" Rbrack token.Pos // position of "]" List []Node // the elements in lexical order } func (l *ListType) Pos() token.Pos { return l.Lbrack } func (l *ListType) Add(node Node) { l.List = append(l.List, node) } // ObjectType represents a HCL Object Type type ObjectType struct { Lbrace token.Pos // position of "{" Rbrace token.Pos // position of "}" List *ObjectList // the nodes in lexical order } func (o *ObjectType) Pos() token.Pos { return o.Lbrace } // Comment node represents a single //, # style or /*- style commment type Comment struct { Start token.Pos // position of / or # Text string } func (c *Comment) Pos() token.Pos { return c.Start } // CommentGroup node represents a sequence of comments with no other tokens and // no empty lines between. type CommentGroup struct { List []*Comment // len(List) > 0 } func (c *CommentGroup) Pos() token.Pos { return c.List[0].Pos() } //------------------------------------------------------------------- // GoStringer //------------------------------------------------------------------- func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
8,722
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
package ast import "fmt" // WalkFunc describes a function to be called for each node during a Walk. The // returned node can be used to rewrite the AST. Walking stops the returned // bool is false. type WalkFunc func(Node) (Node, bool) // Walk traverses an AST in depth-first order: It starts by calling fn(node); // node must not be nil. If fn returns true, Walk invokes fn recursively for // each of the non-nil children of node, followed by a call of fn(nil). The // returned node of fn can be used to rewrite the passed node to fn. func Walk(node Node, fn WalkFunc) Node { rewritten, ok := fn(node) if !ok { return rewritten } switch n := node.(type) { case *File: n.Node = Walk(n.Node, fn) case *ObjectList: for i, item := range n.Items { n.Items[i] = Walk(item, fn).(*ObjectItem) } case *ObjectKey: // nothing to do case *ObjectItem: for i, k := range n.Keys { n.Keys[i] = Walk(k, fn).(*ObjectKey) } if n.Val != nil { n.Val = Walk(n.Val, fn) } case *LiteralType: // nothing to do case *ListType: for i, l := range n.List { n.List[i] = Walk(l, fn) } case *ObjectType: n.List = Walk(n.List, fn).(*ObjectList) default: // should we panic here? fmt.Printf("unknown type: %T\n", n) } fn(nil) return rewritten }
8,723
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
package strconv import ( "errors" "unicode/utf8" ) // ErrSyntax indicates that a value does not have the right syntax for the target type. var ErrSyntax = errors.New("invalid syntax") // Unquote interprets s as a single-quoted, double-quoted, // or backquoted Go string literal, returning the string value // that s quotes. (If s is single-quoted, it would be a Go // character literal; Unquote returns the corresponding // one-character string.) func Unquote(s string) (t string, err error) { n := len(s) if n < 2 { return "", ErrSyntax } quote := s[0] if quote != s[n-1] { return "", ErrSyntax } s = s[1 : n-1] if quote != '"' { return "", ErrSyntax } if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { return "", ErrSyntax } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { switch quote { case '"': return s, nil case '\'': r, size := utf8.DecodeRuneInString(s) if size == len(s) && (r != utf8.RuneError || size != 1) { return s, nil } } } var runeTmp [utf8.UTFMax]byte buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. for len(s) > 0 { // If we're starting a '${}' then let it through un-unquoted. // Specifically: we don't unquote any characters within the `${}` // section. if s[0] == '$' && len(s) > 1 && s[1] == '{' { buf = append(buf, '$', '{') s = s[2:] // Continue reading until we find the closing brace, copying as-is braces := 1 for len(s) > 0 && braces > 0 { r, size := utf8.DecodeRuneInString(s) if r == utf8.RuneError { return "", ErrSyntax } s = s[size:] n := utf8.EncodeRune(runeTmp[:], r) buf = append(buf, runeTmp[:n]...) switch r { case '{': braces++ case '}': braces-- } } if braces != 0 { return "", ErrSyntax } if len(s) == 0 { // If there's no string left, we're done! break } else { // If there's more left, we need to pop back up to the top of the loop // in case there's another interpolation in this string. continue } } if s[0] == '\n' { return "", ErrSyntax } c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err } s = ss if c < utf8.RuneSelf || !multibyte { buf = append(buf, byte(c)) } else { n := utf8.EncodeRune(runeTmp[:], c) buf = append(buf, runeTmp[:n]...) } if quote == '\'' && len(s) != 0 { // single-quoted must be single character return "", ErrSyntax } } return string(buf), nil } // contains reports whether the string contains the byte c. func contains(s string, c byte) bool { for i := 0; i < len(s); i++ { if s[i] == c { return true } } return false } func unhex(b byte) (v rune, ok bool) { c := rune(b) switch { case '0' <= c && c <= '9': return c - '0', true case 'a' <= c && c <= 'f': return c - 'a' + 10, true case 'A' <= c && c <= 'F': return c - 'A' + 10, true } return } func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { // easy cases switch c := s[0]; { case c == quote && (quote == '\'' || quote == '"'): err = ErrSyntax return case c >= utf8.RuneSelf: r, size := utf8.DecodeRuneInString(s) return r, true, s[size:], nil case c != '\\': return rune(s[0]), false, s[1:], nil } // hard case: c is backslash if len(s) <= 1 { err = ErrSyntax return } c := s[1] s = s[2:] switch c { case 'a': value = '\a' case 'b': value = '\b' case 'f': value = '\f' case 'n': value = '\n' case 'r': value = '\r' case 't': value = '\t' case 'v': value = '\v' case 'x', 'u', 'U': n := 0 switch c { case 'x': n = 2 case 'u': n = 4 case 'U': n = 8 } var v rune if len(s) < n { err = ErrSyntax return } for j := 0; j < n; j++ { x, ok := unhex(s[j]) if !ok { err = ErrSyntax return } v = v<<4 | x } s = s[n:] if c == 'x' { // single-byte string, possibly not UTF-8 value = v break } if v > utf8.MaxRune { err = ErrSyntax return } value = v multibyte = true case '0', '1', '2', '3', '4', '5', '6', '7': v := rune(c) - '0' if len(s) < 2 { err = ErrSyntax return } for j := 0; j < 2; j++ { // one digit already; two more x := rune(s[j]) - '0' if x < 0 || x > 7 { err = ErrSyntax return } v = (v << 3) | x } s = s[2:] if v > 255 { err = ErrSyntax return } value = v case '\\': value = '\\' case '\'', '"': if c != quote { err = ErrSyntax return } value = rune(c) default: err = ErrSyntax return } tail = s return }
8,724
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
package parser import ( "fmt" "github.com/hashicorp/hcl/hcl/token" ) // PosError is a parse error that contains a position. type PosError struct { Pos token.Pos Err error } func (e *PosError) Error() string { return fmt.Sprintf("At %s: %s", e.Pos, e.Err) }
8,725
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
8,726
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
// Package scanner implements a scanner for HCL (HashiCorp Configuration // Language) source text. package scanner import ( "bytes" "fmt" "os" "regexp" "unicode" "unicode/utf8" "github.com/hashicorp/hcl/hcl/token" ) // eof represents a marker rune for the end of the reader. const eof = rune(0) // Scanner defines a lexical scanner type Scanner struct { buf *bytes.Buffer // Source buffer for advancing and scanning src []byte // Source buffer for immutable access // Source Position srcPos token.Pos // current position prevPos token.Pos // previous position, used for peek() method lastCharLen int // length of last character in bytes lastLineLen int // length of last line in characters (for correct column reporting) tokStart int // token text start position tokEnd int // token text end position // Error is called for each error encountered. If no Error // function is set, the error is reported to os.Stderr. Error func(pos token.Pos, msg string) // ErrorCount is incremented by one for each error encountered. ErrorCount int // tokPos is the start position of most recently scanned token; set by // Scan. The Filename field is always left untouched by the Scanner. If // an error is reported (via Error) and Position is invalid, the scanner is // not inside a token. tokPos token.Pos } // New creates and initializes a new instance of Scanner using src as // its source content. func New(src []byte) *Scanner { // even though we accept a src, we read from a io.Reader compatible type // (*bytes.Buffer). So in the future we might easily change it to streaming // read. b := bytes.NewBuffer(src) s := &Scanner{ buf: b, src: src, } // srcPosition always starts with 1 s.srcPos.Line = 1 return s } // next reads the next rune from the bufferred reader. Returns the rune(0) if // an error occurs (or io.EOF is returned). func (s *Scanner) next() rune { ch, size, err := s.buf.ReadRune() if err != nil { // advance for error reporting s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size return eof } // remember last position s.prevPos = s.srcPos s.srcPos.Column++ s.lastCharLen = size s.srcPos.Offset += size if ch == utf8.RuneError && size == 1 { s.err("illegal UTF-8 encoding") return ch } if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } if ch == '\x00' { s.err("unexpected null character (0x00)") return eof } if ch == '\uE123' { s.err("unicode code point U+E123 reserved for internal use") return utf8.RuneError } // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch } // unread unreads the previous read Rune and updates the source position func (s *Scanner) unread() { if err := s.buf.UnreadRune(); err != nil { panic(err) // this is user fault, we should catch it } s.srcPos = s.prevPos // put back last position } // peek returns the next rune without advancing the reader. func (s *Scanner) peek() rune { peek, _, err := s.buf.ReadRune() if err != nil { return eof } s.buf.UnreadRune() return peek } // Scan scans the next token and returns the token. func (s *Scanner) Scan() token.Token { ch := s.next() // skip white space for isWhitespace(ch) { ch = s.next() } var tok token.Type // token text markings s.tokStart = s.srcPos.Offset - s.lastCharLen // token position, initial next() is moving the offset by one(size of rune // actually), though we are interested with the starting point s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen if s.srcPos.Column > 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): tok = token.IDENT lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '#', '/': tok = token.COMMENT s.scanComment(ch) case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '<': tok = token.HEREDOC s.scanHeredoc() case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case '=': tok = token.ASSIGN case '+': tok = token.ADD case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { tok = token.SUB } default: s.err("illegal char") } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } func (s *Scanner) scanComment(ch rune) { // single line comments if ch == '#' || (ch == '/' && s.peek() != '*') { if ch == '/' && s.peek() != '/' { s.err("expected '/' for comment") return } ch = s.next() for ch != '\n' && ch >= 0 && ch != eof { ch = s.next() } if ch != eof && ch >= 0 { s.unread() } return } // be sure we get the character after /* This allows us to find comment's // that are not erminated if ch == '/' { s.next() ch = s.next() // read character after "/*" } // look for /* - style comments for { if ch < 0 || ch == eof { s.err("comment not terminated") break } ch0 := ch ch = s.next() if ch0 == '*' && ch == '/' { break } } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { if ch == '0' { // check for hexadecimal, octal or float ch = s.next() if ch == 'x' || ch == 'X' { // hexadecimal ch = s.next() found := false for isHexadecimal(ch) { ch = s.next() found = true } if !found { s.err("illegal hexadecimal number") } if ch != eof { s.unread() } return token.NUMBER } // now it's either something like: 0421(octal) or 0.1231(float) illegalOctal := false for isDecimal(ch) { ch = s.next() if ch == '8' || ch == '9' { // this is just a possibility. For example 0159 is illegal, but // 0159.23 is valid. So we mark a possible illegal octal. If // the next character is not a period, we'll print the error. illegalOctal = true } } if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if illegalOctal { s.err("illegal octal number") } if ch != eof { s.unread() } return token.NUMBER } s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanHeredoc scans a heredoc string func (s *Scanner) scanHeredoc() { // Scan the second '<' in example: '<<EOF' if s.next() != '<' { s.err("heredoc expected second '<', didn't see it") return } // Get the original offset so we can read just the heredoc ident offs := s.srcPos.Offset // Scan the identifier ch := s.next() // Indented heredoc syntax if ch == '-' { ch = s.next() } for isLetter(ch) || isDigit(ch) { ch = s.next() } // If we reached an EOF then that is not good if ch == eof { s.err("heredoc not terminated") return } // Ignore the '\r' in Windows line endings if ch == '\r' { if s.peek() == '\n' { ch = s.next() } } // If we didn't reach a newline then that is also not good if ch != '\n' { s.err("invalid characters in heredoc anchor") return } // Read the identifier identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen] if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') { s.err("zero-length heredoc anchor") return } var identRegexp *regexp.Regexp if identBytes[0] == '-' { identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:])) } else { identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes)) } // Read the actual string value lineStart := s.srcPos.Offset for { ch := s.next() // Special newline handling. if ch == '\n' { // Math is fast, so we first compare the byte counts to see if we have a chance // of seeing the same identifier - if the length is less than the number of bytes // in the identifier, this cannot be a valid terminator. lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { break } // Not an anchor match, record the start of a new line lineStart = s.srcPos.Offset } if ch == eof { s.err("heredoc not terminated") return } } return } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' && braces == 0 { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { start := n for n > 0 && digitVal(ch) < base { ch = s.next() if ch == eof { // If we see an EOF, we halt any more scanning of digits // immediately. break } n-- } if n > 0 { s.err("illegal char escape") } if n != start && ch != eof { // we scanned all digits, put the last non digit char back, // only if we read anything at all s.unread() } return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isDigit returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isDecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val }
8,727
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
// Package printer implements printing of AST nodes to HCL format. package printer import ( "bytes" "io" "text/tabwriter" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" ) var DefaultConfig = Config{ SpacesWidth: 2, } // A Config node controls the output of Fprint. type Config struct { SpacesWidth int // if set, it will use spaces instead of tabs for alignment } func (c *Config) Fprint(output io.Writer, node ast.Node) error { p := &printer{ cfg: *c, comments: make([]*ast.CommentGroup, 0), standaloneComments: make([]*ast.CommentGroup, 0), // enableTrace: true, } p.collectComments(node) if _, err := output.Write(p.unindent(p.output(node))); err != nil { return err } // flush tabwriter, if any var err error if tw, _ := output.(*tabwriter.Writer); tw != nil { err = tw.Flush() } return err } // Fprint "pretty-prints" an HCL node to output // It calls Config.Fprint with default settings. func Fprint(output io.Writer, node ast.Node) error { return DefaultConfig.Fprint(output, node) } // Format formats src HCL and returns the result. func Format(src []byte) ([]byte, error) { node, err := parser.Parse(src) if err != nil { return nil, err } var buf bytes.Buffer if err := DefaultConfig.Fprint(&buf, node); err != nil { return nil, err } // Add trailing newline to result buf.WriteString("\n") return buf.Bytes(), nil }
8,728
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
package printer import ( "bytes" "fmt" "sort" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" ) const ( blank = byte(' ') newline = byte('\n') tab = byte('\t') infinity = 1 << 30 // offset or line ) var ( unindent = []byte("\uE123") // in the private use space ) type printer struct { cfg Config prev token.Pos comments []*ast.CommentGroup // may be nil, contains all comments standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) enableTrace bool indentTrace int } type ByPosition []*ast.CommentGroup func (b ByPosition) Len() int { return len(b) } func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } // collectComments comments all standalone comments which are not lead or line // comment func (p *printer) collectComments(node ast.Node) { // first collect all comments. This is already stored in // ast.File.(comments) ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { switch t := nn.(type) { case *ast.File: p.comments = t.Comments return nn, false } return nn, true }) standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) for _, c := range p.comments { standaloneComments[c.Pos()] = c } // next remove all lead and line comments from the overall comment map. // This will give us comments which are standalone, comments which are not // assigned to any kind of node. ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { switch t := nn.(type) { case *ast.LiteralType: if t.LeadComment != nil { for _, comment := range t.LeadComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } if t.LineComment != nil { for _, comment := range t.LineComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } case *ast.ObjectItem: if t.LeadComment != nil { for _, comment := range t.LeadComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } if t.LineComment != nil { for _, comment := range t.LineComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { delete(standaloneComments, comment.Pos()) } } } } return nn, true }) for _, c := range standaloneComments { p.standaloneComments = append(p.standaloneComments, c) } sort.Sort(ByPosition(p.standaloneComments)) } // output prints creates b printable HCL output and returns it. func (p *printer) output(n interface{}) []byte { var buf bytes.Buffer switch t := n.(type) { case *ast.File: // File doesn't trace so we add the tracing here defer un(trace(p, "File")) return p.output(t.Node) case *ast.ObjectList: defer un(trace(p, "ObjectList")) var index int for { // Determine the location of the next actual non-comment // item. If we're at the end, the next item is at "infinity" var nextItem token.Pos if index != len(t.Items) { nextItem = t.Items[index].Pos() } else { nextItem = token.Pos{Offset: infinity, Line: infinity} } // Go through the standalone comments in the file and print out // the comments that we should be for this object item. for _, c := range p.standaloneComments { // Go through all the comments in the group. The group // should be printed together, not separated by double newlines. printed := false newlinePrinted := false for _, comment := range c.List { // We only care about comments after the previous item // we've printed so that comments are printed in the // correct locations (between two objects for example). // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // if we hit the end add newlines so we can print the comment // we don't do this if prev is invalid which means the // beginning of the file since the first comment should // be at the first line. if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { buf.Write([]byte{newline, newline}) newlinePrinted = true } // Write the actual comment. buf.WriteString(comment.Text) buf.WriteByte(newline) // Set printed to true to note that we printed something printed = true } } // If we're not at the last item, write a new line so // that there is a newline separating this comment from // the next object. if printed && index != len(t.Items) { buf.WriteByte(newline) } } if index == len(t.Items) { break } buf.Write(p.output(t.Items[index])) if index != len(t.Items)-1 { // Always write a newline to separate us from the next item buf.WriteByte(newline) // Need to determine if we're going to separate the next item // with a blank line. The logic here is simple, though there // are a few conditions: // // 1. The next object is more than one line away anyways, // so we need an empty line. // // 2. The next object is not a "single line" object, so // we need an empty line. // // 3. This current object is not a single line object, // so we need an empty line. current := t.Items[index] next := t.Items[index+1] if next.Pos().Line != t.Items[index].Pos().Line+1 || !p.isSingleLineObject(next) || !p.isSingleLineObject(current) { buf.WriteByte(newline) } } index++ } case *ast.ObjectKey: buf.WriteString(t.Token.Text) case *ast.ObjectItem: p.prev = t.Pos() buf.Write(p.objectItem(t)) case *ast.LiteralType: buf.Write(p.literalType(t)) case *ast.ListType: buf.Write(p.list(t)) case *ast.ObjectType: buf.Write(p.objectType(t)) default: fmt.Printf(" unknown type: %T\n", n) } return buf.Bytes() } func (p *printer) literalType(lit *ast.LiteralType) []byte { result := []byte(lit.Token.Text) switch lit.Token.Type { case token.HEREDOC: // Clear the trailing newline from heredocs if result[len(result)-1] == '\n' { result = result[:len(result)-1] } // Poison lines 2+ so that we don't indent them result = p.heredocIndent(result) case token.STRING: // If this is a multiline string, poison lines 2+ so we don't // indent them. if bytes.IndexRune(result, '\n') >= 0 { result = p.heredocIndent(result) } } return result } // objectItem returns the printable HCL form of an object item. An object type // starts with one/multiple keys and has a value. The value might be of any // type. func (p *printer) objectItem(o *ast.ObjectItem) []byte { defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) var buf bytes.Buffer if o.LeadComment != nil { for _, comment := range o.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } // If key and val are on different lines, treat line comments like lead comments. if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range o.Keys { buf.WriteString(k.Token.Text) buf.WriteByte(blank) // reach end of key if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } buf.Write(p.output(o.Val)) if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { buf.WriteByte(blank) for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) } } return buf.Bytes() } // objectType returns the printable HCL form of an object type. An object type // begins with a brace and ends with a brace. func (p *printer) objectType(o *ast.ObjectType) []byte { defer un(trace(p, "ObjectType")) var buf bytes.Buffer buf.WriteString("{") var index int var nextItem token.Pos var commented, newlinePrinted bool for { // Determine the location of the next actual non-comment // item. If we're at the end, the next item is the closing brace if index != len(o.List.Items) { nextItem = o.List.Items[index].Pos() } else { nextItem = o.Rbrace } // Go through the standalone comments in the file and print out // the comments that we should be for this object item. for _, c := range p.standaloneComments { printed := false var lastCommentPos token.Pos for _, comment := range c.List { // We only care about comments after the previous item // we've printed so that comments are printed in the // correct locations (between two objects for example). // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // If there are standalone comments and the initial newline has not // been printed yet, do it now. if !newlinePrinted { newlinePrinted = true buf.WriteByte(newline) } // add newline if it's between other printed nodes if index > 0 { commented = true buf.WriteByte(newline) } // Store this position lastCommentPos = comment.Pos() // output the comment itself buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) // Set printed to true to note that we printed something printed = true /* if index != len(o.List.Items) { buf.WriteByte(newline) // do not print on the end } */ } } // Stuff to do if we had comments if printed { // Always write a newline buf.WriteByte(newline) // If there is another item in the object and our comment // didn't hug it directly, then make sure there is a blank // line separating them. if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { buf.WriteByte(newline) } } } if index == len(o.List.Items) { p.prev = o.Rbrace break } // At this point we are sure that it's not a totally empty block: print // the initial newline if it hasn't been printed yet by the previous // block about standalone comments. if !newlinePrinted { buf.WriteByte(newline) newlinePrinted = true } // check if we have adjacent one liner items. If yes we'll going to align // the comments. var aligned []*ast.ObjectItem for _, item := range o.List.Items[index:] { // we don't group one line lists if len(o.List.Items) == 1 { break } // one means a oneliner with out any lead comment // two means a oneliner with lead comment // anything else might be something else cur := lines(string(p.objectItem(item))) if cur > 2 { break } curPos := item.Pos() nextPos := token.Pos{} if index != len(o.List.Items)-1 { nextPos = o.List.Items[index+1].Pos() } prevPos := token.Pos{} if index != 0 { prevPos = o.List.Items[index-1].Pos() } // fmt.Println("DEBUG ----------------") // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) if curPos.Line+1 == nextPos.Line { aligned = append(aligned, item) index++ continue } if curPos.Line-1 == prevPos.Line { aligned = append(aligned, item) index++ // finish if we have a new line or comment next. This happens // if the next item is not adjacent if curPos.Line+1 != nextPos.Line { break } continue } break } // put newlines if the items are between other non aligned items. // newlines are also added if there is a standalone comment already, so // check it too if !commented && index != len(aligned) { buf.WriteByte(newline) } if len(aligned) >= 1 { p.prev = aligned[len(aligned)-1].Pos() items := p.alignedItems(aligned) buf.Write(p.indent(items)) } else { p.prev = o.List.Items[index].Pos() buf.Write(p.indent(p.objectItem(o.List.Items[index]))) index++ } buf.WriteByte(newline) } buf.WriteString("}") return buf.Bytes() } func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { var buf bytes.Buffer // find the longest key and value length, needed for alignment var longestKeyLen int // longest key length var longestValLen int // longest value length for _, item := range items { key := len(item.Keys[0].Token.Text) val := len(p.output(item.Val)) if key > longestKeyLen { longestKeyLen = key } if val > longestValLen { longestValLen = val } } for i, item := range items { if item.LeadComment != nil { for _, comment := range item.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range item.Keys { keyLen := len(k.Token.Text) buf.WriteString(k.Token.Text) for i := 0; i < longestKeyLen-keyLen+1; i++ { buf.WriteByte(blank) } // reach end of key if i == len(item.Keys)-1 && len(item.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } val := p.output(item.Val) valLen := len(val) buf.Write(val) if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { for i := 0; i < longestValLen-valLen+1; i++ { buf.WriteByte(blank) } for _, comment := range item.LineComment.List { buf.WriteString(comment.Text) } } // do not print for the last item if i != len(items)-1 { buf.WriteByte(newline) } } return buf.Bytes() } // list returns the printable HCL form of an list type. func (p *printer) list(l *ast.ListType) []byte { if p.isSingleLineList(l) { return p.singleLineList(l) } var buf bytes.Buffer buf.WriteString("[") buf.WriteByte(newline) var longestLine int for _, item := range l.List { // for now we assume that the list only contains literal types if lit, ok := item.(*ast.LiteralType); ok { lineLen := len(lit.Token.Text) if lineLen > longestLine { longestLine = lineLen } } } haveEmptyLine := false for i, item := range l.List { // If we have a lead comment, then we want to write that first leadComment := false if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { leadComment = true // Ensure an empty line before every element with a // lead comment (except the first item in a list). if !haveEmptyLine && i != 0 { buf.WriteByte(newline) } for _, comment := range lit.LeadComment.List { buf.Write(p.indent([]byte(comment.Text))) buf.WriteByte(newline) } } // also indent each line val := p.output(item) curLen := len(val) buf.Write(p.indent(val)) // if this item is a heredoc, then we output the comma on // the next line. This is the only case this happens. comma := []byte{','} if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) comma = p.indent(comma) } buf.Write(comma) if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { // if the next item doesn't have any comments, do not align buf.WriteByte(blank) // align one space for i := 0; i < longestLine-curLen; i++ { buf.WriteByte(blank) } for _, comment := range lit.LineComment.List { buf.WriteString(comment.Text) } } buf.WriteByte(newline) // Ensure an empty line after every element with a // lead comment (except the first item in a list). haveEmptyLine = leadComment && i != len(l.List)-1 if haveEmptyLine { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // isSingleLineList returns true if: // * they were previously formatted entirely on one line // * they consist entirely of literals // * there are either no heredoc strings or the list has exactly one element // * there are no line comments func (printer) isSingleLineList(l *ast.ListType) bool { for _, item := range l.List { if item.Pos().Line != l.Lbrack.Line { return false } lit, ok := item.(*ast.LiteralType) if !ok { return false } if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { return false } if lit.LineComment != nil { return false } } return true } // singleLineList prints a simple single line list. // For a definition of "simple", see isSingleLineList above. func (p *printer) singleLineList(l *ast.ListType) []byte { buf := &bytes.Buffer{} buf.WriteString("[") for i, item := range l.List { if i != 0 { buf.WriteString(", ") } // Output the item itself buf.Write(p.output(item)) // The heredoc marker needs to be at the end of line. if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // indent indents the lines of the given buffer for each non-empty line func (p *printer) indent(buf []byte) []byte { var prefix []byte if p.cfg.SpacesWidth != 0 { for i := 0; i < p.cfg.SpacesWidth; i++ { prefix = append(prefix, blank) } } else { prefix = []byte{tab} } var res []byte bol := true for _, c := range buf { if bol && c != '\n' { res = append(res, prefix...) } res = append(res, c) bol = c == '\n' } return res } // unindent removes all the indentation from the tombstoned lines func (p *printer) unindent(buf []byte) []byte { var res []byte for i := 0; i < len(buf); i++ { skip := len(buf)-i <= len(unindent) if !skip { skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) } if skip { res = append(res, buf[i]) continue } // We have a marker. we have to backtrace here and clean out // any whitespace ahead of our tombstone up to a \n for j := len(res) - 1; j >= 0; j-- { if res[j] == '\n' { break } res = res[:j] } // Skip the entire unindent marker i += len(unindent) - 1 } return res } // heredocIndent marks all the 2nd and further lines as unindentable func (p *printer) heredocIndent(buf []byte) []byte { var res []byte bol := false for _, c := range buf { if bol && c != '\n' { res = append(res, unindent...) } res = append(res, c) bol = c == '\n' } return res } // isSingleLineObject tells whether the given object item is a single // line object such as "obj {}". // // A single line object: // // * has no lead comments (hence multi-line) // * has no assignment // * has no values in the stanza (within {}) // func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { // If there is a lead comment, can't be one line if val.LeadComment != nil { return false } // If there is assignment, we always break by line if val.Assign.IsValid() { return false } // If it isn't an object type, then its not a single line object ot, ok := val.Val.(*ast.ObjectType) if !ok { return false } // If the object has no items, it is single line! return len(ot.List.Items) == 0 } func lines(txt string) int { endline := 1 for i := 0; i < len(txt); i++ { if txt[i] == '\n' { endline++ } } return endline } // ---------------------------------------------------------------------------- // Tracing support func (p *printer) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) i := 2 * p.indentTrace for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *printer, msg string) *printer { p.printTrace(msg, "(") p.indentTrace++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *printer) { p.indentTrace-- p.printTrace(")") }
8,729
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/token/token.go
// Package token defines constants representing the lexical tokens for HCL // (HashiCorp Configuration Language) package token import ( "fmt" "strconv" "strings" hclstrconv "github.com/hashicorp/hcl/hcl/strconv" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string JSON bool } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF COMMENT identifier_beg IDENT // literals literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" HEREDOC // <<FOO\nbar\nFOO literal_end identifier_end operator_beg LBRACK // [ LBRACE // { COMMA // , PERIOD // . RBRACK // ] RBRACE // } ASSIGN // = ADD // + SUB // - operator_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", COMMENT: "COMMENT", IDENT: "IDENT", NUMBER: "NUMBER", FLOAT: "FLOAT", BOOL: "BOOL", STRING: "STRING", LBRACK: "LBRACK", LBRACE: "LBRACE", COMMA: "COMMA", PERIOD: "PERIOD", HEREDOC: "HEREDOC", RBRACK: "RBRACK", RBRACE: "RBRACE", ASSIGN: "ASSIGN", ADD: "ADD", SUB: "SUB", } // String returns the string corresponding to the token tok. func (t Type) String() string { s := "" if 0 <= t && t < Type(len(tokens)) { s = tokens[t] } if s == "" { s = "token(" + strconv.Itoa(int(t)) + ")" } return s } // IsIdentifier returns true for tokens corresponding to identifiers and basic // type literals; it returns false otherwise. func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } // IsLiteral returns true for tokens corresponding to basic type literals; it // returns false otherwise. func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } // String returns the token's literal text. Note that this is only // applicable for certain token types, such as token.IDENT, // token.STRING, etc.. func (t Token) String() string { return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) } // Value returns the properly typed value for this token. The type of // the returned interface{} is guaranteed based on the Type field. // // This can only be called for literal types. If it is called for any other // type, this will panic. func (t Token) Value() interface{} { switch t.Type { case BOOL: if t.Text == "true" { return true } else if t.Text == "false" { return false } panic("unknown bool value: " + t.Text) case FLOAT: v, err := strconv.ParseFloat(t.Text, 64) if err != nil { panic(err) } return float64(v) case NUMBER: v, err := strconv.ParseInt(t.Text, 0, 64) if err != nil { panic(err) } return int64(v) case IDENT: return t.Text case HEREDOC: return unindentHeredoc(t.Text) case STRING: // Determine the Unquote method to use. If it came from JSON, // then we need to use the built-in unquote since we have to // escape interpolations there. f := hclstrconv.Unquote if t.JSON { f = strconv.Unquote } // This case occurs if json null is used if t.Text == "" { return "" } v, err := f(t.Text) if err != nil { panic(fmt.Sprintf("unquote %s err: %s", t.Text, err)) } return v default: panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type)) } } // unindentHeredoc returns the string content of a HEREDOC if it is started with << // and the content of a HEREDOC with the hanging indent removed if it is started with // a <<-, and the terminating line is at least as indented as the least indented line. func unindentHeredoc(heredoc string) string { // We need to find the end of the marker idx := strings.IndexByte(heredoc, '\n') if idx == -1 { panic("heredoc doesn't contain newline") } unindent := heredoc[2] == '-' // We can optimize if the heredoc isn't marked for indentation if !unindent { return string(heredoc[idx+1 : len(heredoc)-idx+1]) } // We need to unindent each line based on the indentation level of the marker lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n") whitespacePrefix := lines[len(lines)-1] isIndented := true for _, v := range lines { if strings.HasPrefix(v, whitespacePrefix) { continue } isIndented = false break } // If all lines are not at least as indented as the terminating mark, return the // heredoc as is, but trim the leading space from the marker on the final line. if !isIndented { return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t") } unindentedLines := make([]string, len(lines)) for k, v := range lines { if k == len(lines)-1 { unindentedLines[k] = "" break } unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix) } return strings.Join(unindentedLines, "\n") }
8,730
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/hcl/token/position.go
package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line }
8,731
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
package parser import "github.com/hashicorp/hcl/hcl/ast" // flattenObjects takes an AST node, walks it, and flattens func flattenObjects(node ast.Node) { ast.Walk(node, func(n ast.Node) (ast.Node, bool) { // We only care about lists, because this is what we modify list, ok := n.(*ast.ObjectList) if !ok { return n, true } // Rebuild the item list items := make([]*ast.ObjectItem, 0, len(list.Items)) frontier := make([]*ast.ObjectItem, len(list.Items)) copy(frontier, list.Items) for len(frontier) > 0 { // Pop the current item n := len(frontier) item := frontier[n-1] frontier = frontier[:n-1] switch v := item.Val.(type) { case *ast.ObjectType: items, frontier = flattenObjectType(v, item, items, frontier) case *ast.ListType: items, frontier = flattenListType(v, item, items, frontier) default: items = append(items, item) } } // Reverse the list since the frontier model runs things backwards for i := len(items)/2 - 1; i >= 0; i-- { opp := len(items) - 1 - i items[i], items[opp] = items[opp], items[i] } // Done! Set the original items list.Items = items return n, true }) } func flattenListType( ot *ast.ListType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list is empty, keep the original list if len(ot.List) == 0 { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List { if _, ok := subitem.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, elem := range ot.List { // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: item.Keys, Assign: item.Assign, Val: elem, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier } func flattenObjectType( ot *ast.ObjectType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list has no items we do not have to flatten anything if ot.List.Items == nil { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List.Items { if _, ok := subitem.Val.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, subitem := range ot.List.Items { // Copy the new key keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) copy(keys, item.Keys) copy(keys[len(item.Keys):], subitem.Keys) // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: keys, Assign: item.Assign, Val: subitem.Val, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier }
8,732
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json/parser/parser.go
package parser import ( "errors" "fmt" "github.com/hashicorp/hcl/hcl/ast" hcltoken "github.com/hashicorp/hcl/hcl/token" "github.com/hashicorp/hcl/json/scanner" "github.com/hashicorp/hcl/json/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = fmt.Errorf("%s: %s", pos, msg) } // The root must be an object in JSON object, err := p.object() if scerr != nil { return nil, scerr } if err != nil { return nil, err } // We make our final node an object list so it is more HCL compatible f.Node = object.List // Flatten it, which finds patterns and turns them into more HCL-like // AST trees. flattenObjects(f.Node) return f, nil } func (p *Parser) objectList() (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // Check for a followup comma. If it isn't a comma, then we're done if tok := p.scan(); tok.Type != token.COMMA { break } } return node, nil } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } switch p.tok.Type { case token.COLON: pos := p.tok.Pos o.Assign = hcltoken.Pos{ Filename: pos.Filename, Offset: pos.Offset, Line: pos.Line, Column: pos.Column, } o.Val, err = p.objectValue() if err != nil { return nil, err } } return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: return nil, errEofToken case token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{ Token: p.tok.HCLToken(), }) case token.COLON: // If we have a zero keycount it means that we never got // an object key, i.e. `{ :`. This is a syntax error. if keyCount == 0 { return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } // Done return keys, nil case token.ILLEGAL: return nil, errors.New("illegal") default: return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) objectValue() (ast.Node, error) { defer un(trace(p, "ParseObjectValue")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (*ast.ObjectType, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.LBRACE: return p.objectType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{} l, err := p.objectList() // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } o.List = l return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{} for { tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.STRING: node, err := p.literalType() if err != nil { return nil, err } l.Add(node) case token.COMMA: continue case token.LBRACE: node, err := p.objectType() if err != nil { return nil, err } l.Add(node) case token.BOOL: // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: // TODO(arslan) should we support nested lists? Even though it's // written in README of HCL, it's not a part of the grammar // (not defined in parse.y) case token.RBRACK: // finished return l, nil default: return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok.HCLToken(), }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } p.tok = p.sc.Scan() return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
8,733
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
package scanner import ( "bytes" "fmt" "os" "unicode" "unicode/utf8" "github.com/hashicorp/hcl/json/token" ) // eof represents a marker rune for the end of the reader. const eof = rune(0) // Scanner defines a lexical scanner type Scanner struct { buf *bytes.Buffer // Source buffer for advancing and scanning src []byte // Source buffer for immutable access // Source Position srcPos token.Pos // current position prevPos token.Pos // previous position, used for peek() method lastCharLen int // length of last character in bytes lastLineLen int // length of last line in characters (for correct column reporting) tokStart int // token text start position tokEnd int // token text end position // Error is called for each error encountered. If no Error // function is set, the error is reported to os.Stderr. Error func(pos token.Pos, msg string) // ErrorCount is incremented by one for each error encountered. ErrorCount int // tokPos is the start position of most recently scanned token; set by // Scan. The Filename field is always left untouched by the Scanner. If // an error is reported (via Error) and Position is invalid, the scanner is // not inside a token. tokPos token.Pos } // New creates and initializes a new instance of Scanner using src as // its source content. func New(src []byte) *Scanner { // even though we accept a src, we read from a io.Reader compatible type // (*bytes.Buffer). So in the future we might easily change it to streaming // read. b := bytes.NewBuffer(src) s := &Scanner{ buf: b, src: src, } // srcPosition always starts with 1 s.srcPos.Line = 1 return s } // next reads the next rune from the bufferred reader. Returns the rune(0) if // an error occurs (or io.EOF is returned). func (s *Scanner) next() rune { ch, size, err := s.buf.ReadRune() if err != nil { // advance for error reporting s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size return eof } if ch == utf8.RuneError && size == 1 { s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size s.err("illegal UTF-8 encoding") return ch } // remember last position s.prevPos = s.srcPos s.srcPos.Column++ s.lastCharLen = size s.srcPos.Offset += size if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch } // unread unreads the previous read Rune and updates the source position func (s *Scanner) unread() { if err := s.buf.UnreadRune(); err != nil { panic(err) // this is user fault, we should catch it } s.srcPos = s.prevPos // put back last position } // peek returns the next rune without advancing the reader. func (s *Scanner) peek() rune { peek, _, err := s.buf.ReadRune() if err != nil { return eof } s.buf.UnreadRune() return peek } // Scan scans the next token and returns the token. func (s *Scanner) Scan() token.Token { ch := s.next() // skip white space for isWhitespace(ch) { ch = s.next() } var tok token.Type // token text markings s.tokStart = s.srcPos.Offset - s.lastCharLen // token position, initial next() is moving the offset by one(size of rune // actually), though we are interested with the starting point s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen if s.srcPos.Column > 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } else if lit == "null" { tok = token.NULL } else { s.err("illegal char") } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case ':': tok = token.COLON case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { s.err("illegal char") } default: s.err("illegal char: " + string(ch)) } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { zero := ch == '0' pos := s.srcPos s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } // If we have a larger number and this is zero, error if zero && pos != s.srcPos { s.err("numbers cannot start with 0") } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if ch == '\n' || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { for n > 0 && digitVal(ch) < base { ch = s.next() n-- } if n > 0 { s.err("illegal char escape") } // we scanned all digits, put the last non digit char back s.unread() return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isHexadecimal returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isHexadecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val }
8,734
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json/token/token.go
package token import ( "fmt" "strconv" hcltoken "github.com/hashicorp/hcl/hcl/token" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF identifier_beg literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" NULL // null literal_end identifier_end operator_beg LBRACK // [ LBRACE // { COMMA // , PERIOD // . COLON // : RBRACK // ] RBRACE // } operator_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", NUMBER: "NUMBER", FLOAT: "FLOAT", BOOL: "BOOL", STRING: "STRING", NULL: "NULL", LBRACK: "LBRACK", LBRACE: "LBRACE", COMMA: "COMMA", PERIOD: "PERIOD", COLON: "COLON", RBRACK: "RBRACK", RBRACE: "RBRACE", } // String returns the string corresponding to the token tok. func (t Type) String() string { s := "" if 0 <= t && t < Type(len(tokens)) { s = tokens[t] } if s == "" { s = "token(" + strconv.Itoa(int(t)) + ")" } return s } // IsIdentifier returns true for tokens corresponding to identifiers and basic // type literals; it returns false otherwise. func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } // IsLiteral returns true for tokens corresponding to basic type literals; it // returns false otherwise. func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } // String returns the token's literal text. Note that this is only // applicable for certain token types, such as token.IDENT, // token.STRING, etc.. func (t Token) String() string { return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) } // HCLToken converts this token to an HCL token. // // The token type must be a literal type or this will panic. func (t Token) HCLToken() hcltoken.Token { switch t.Type { case BOOL: return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} case FLOAT: return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} case NULL: return hcltoken.Token{Type: hcltoken.STRING, Text: ""} case NUMBER: return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} case STRING: return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} default: panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) } }
8,735
0
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json
kubeflow_public_repos/fate-operator/vendor/github.com/hashicorp/hcl/json/token/position.go
package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line }
8,736
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/uuid.go
// Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Package uuid provides implementation of Universally Unique Identifier (UUID). // Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and // version 2 (as specified in DCE 1.1). package uuid import ( "bytes" "encoding/hex" ) // Size of a UUID in bytes. const Size = 16 // UUID representation compliant with specification // described in RFC 4122. type UUID [Size]byte // UUID versions const ( _ byte = iota V1 V2 V3 V4 V5 ) // UUID layout variants. const ( VariantNCS byte = iota VariantRFC4122 VariantMicrosoft VariantFuture ) // UUID DCE domains. const ( DomainPerson = iota DomainGroup DomainOrg ) // String parse helpers. var ( urnPrefix = []byte("urn:uuid:") byteGroups = []int{8, 4, 4, 4, 12} ) // Nil is special form of UUID that is specified to have all // 128 bits set to zero. var Nil = UUID{} // Predefined namespace UUIDs. var ( NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) ) // Equal returns true if u1 and u2 equals, otherwise returns false. func Equal(u1 UUID, u2 UUID) bool { return bytes.Equal(u1[:], u2[:]) } // Version returns algorithm version used to generate UUID. func (u UUID) Version() byte { return u[6] >> 4 } // Variant returns UUID layout variant. func (u UUID) Variant() byte { switch { case (u[8] >> 7) == 0x00: return VariantNCS case (u[8] >> 6) == 0x02: return VariantRFC4122 case (u[8] >> 5) == 0x06: return VariantMicrosoft case (u[8] >> 5) == 0x07: fallthrough default: return VariantFuture } } // Bytes returns bytes slice representation of UUID. func (u UUID) Bytes() []byte { return u[:] } // Returns canonical string representation of UUID: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. func (u UUID) String() string { buf := make([]byte, 36) hex.Encode(buf[0:8], u[0:4]) buf[8] = '-' hex.Encode(buf[9:13], u[4:6]) buf[13] = '-' hex.Encode(buf[14:18], u[6:8]) buf[18] = '-' hex.Encode(buf[19:23], u[8:10]) buf[23] = '-' hex.Encode(buf[24:], u[10:]) return string(buf) } // SetVersion sets version bits. func (u *UUID) SetVersion(v byte) { u[6] = (u[6] & 0x0f) | (v << 4) } // SetVariant sets variant bits. func (u *UUID) SetVariant(v byte) { switch v { case VariantNCS: u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) case VariantRFC4122: u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) case VariantMicrosoft: u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) case VariantFuture: fallthrough default: u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) } } // Must is a helper that wraps a call to a function returning (UUID, error) // and panics if the error is non-nil. It is intended for use in variable // initializations such as // var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); func Must(u UUID, err error) UUID { if err != nil { panic(err) } return u }
8,737
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/README.md
# UUID package for Go language [![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) [![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) [![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. With 100% test coverage and benchmarks out of box. Supported versions: * Version 1, based on timestamp and MAC address (RFC 4122) * Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) * Version 3, based on MD5 hashing (RFC 4122) * Version 4, based on random numbers (RFC 4122) * Version 5, based on SHA-1 hashing (RFC 4122) ## Installation Use the `go` command: $ go get github.com/satori/go.uuid ## Requirements UUID package requires Go >= 1.2. ## Example ```go package main import ( "fmt" "github.com/satori/go.uuid" ) func main() { // Creating UUID Version 4 u1 := uuid.NewV4() fmt.Printf("UUIDv4: %s\n", u1) // Parsing UUID from string input u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") if err != nil { fmt.Printf("Something gone wrong: %s", err) } fmt.Printf("Successfully parsed: %s", u2) } ``` ## Documentation [Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. ## Links * [RFC 4122](http://tools.ietf.org/html/rfc4122) * [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) ## Copyright Copyright (C) 2013-2018 by Maxim Bublis <[email protected]>. UUID package released under MIT License. See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
8,738
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/sql.go
// Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package uuid import ( "database/sql/driver" "fmt" ) // Value implements the driver.Valuer interface. func (u UUID) Value() (driver.Value, error) { return u.String(), nil } // Scan implements the sql.Scanner interface. // A 16-byte slice is handled by UnmarshalBinary, while // a longer byte slice or a string is handled by UnmarshalText. func (u *UUID) Scan(src interface{}) error { switch src := src.(type) { case []byte: if len(src) == Size { return u.UnmarshalBinary(src) } return u.UnmarshalText(src) case string: return u.UnmarshalText([]byte(src)) } return fmt.Errorf("uuid: cannot convert %T to UUID", src) } // NullUUID can be used with the standard sql package to represent a // UUID value that can be NULL in the database type NullUUID struct { UUID UUID Valid bool } // Value implements the driver.Valuer interface. func (u NullUUID) Value() (driver.Value, error) { if !u.Valid { return nil, nil } // Delegate to UUID Value function return u.UUID.Value() } // Scan implements the sql.Scanner interface. func (u *NullUUID) Scan(src interface{}) error { if src == nil { u.UUID, u.Valid = Nil, false return nil } // Delegate to UUID Scan function u.Valid = true return u.UUID.Scan(src) }
8,739
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/LICENSE
Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8,740
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/generator.go
// Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package uuid import ( "crypto/md5" "crypto/rand" "crypto/sha1" "encoding/binary" "hash" "net" "os" "sync" "time" ) // Difference in 100-nanosecond intervals between // UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). const epochStart = 122192928000000000 var ( global = newDefaultGenerator() epochFunc = unixTimeFunc posixUID = uint32(os.Getuid()) posixGID = uint32(os.Getgid()) ) // NewV1 returns UUID based on current timestamp and MAC address. func NewV1() UUID { return global.NewV1() } // NewV2 returns DCE Security UUID based on POSIX UID/GID. func NewV2(domain byte) UUID { return global.NewV2(domain) } // NewV3 returns UUID based on MD5 hash of namespace UUID and name. func NewV3(ns UUID, name string) UUID { return global.NewV3(ns, name) } // NewV4 returns random generated UUID. func NewV4() UUID { return global.NewV4() } // NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. func NewV5(ns UUID, name string) UUID { return global.NewV5(ns, name) } // Generator provides interface for generating UUIDs. type Generator interface { NewV1() UUID NewV2(domain byte) UUID NewV3(ns UUID, name string) UUID NewV4() UUID NewV5(ns UUID, name string) UUID } // Default generator implementation. type generator struct { storageOnce sync.Once storageMutex sync.Mutex lastTime uint64 clockSequence uint16 hardwareAddr [6]byte } func newDefaultGenerator() Generator { return &generator{} } // NewV1 returns UUID based on current timestamp and MAC address. func (g *generator) NewV1() UUID { u := UUID{} timeNow, clockSeq, hardwareAddr := g.getStorage() binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) binary.BigEndian.PutUint16(u[8:], clockSeq) copy(u[10:], hardwareAddr) u.SetVersion(V1) u.SetVariant(VariantRFC4122) return u } // NewV2 returns DCE Security UUID based on POSIX UID/GID. func (g *generator) NewV2(domain byte) UUID { u := UUID{} timeNow, clockSeq, hardwareAddr := g.getStorage() switch domain { case DomainPerson: binary.BigEndian.PutUint32(u[0:], posixUID) case DomainGroup: binary.BigEndian.PutUint32(u[0:], posixGID) } binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) binary.BigEndian.PutUint16(u[8:], clockSeq) u[9] = domain copy(u[10:], hardwareAddr) u.SetVersion(V2) u.SetVariant(VariantRFC4122) return u } // NewV3 returns UUID based on MD5 hash of namespace UUID and name. func (g *generator) NewV3(ns UUID, name string) UUID { u := newFromHash(md5.New(), ns, name) u.SetVersion(V3) u.SetVariant(VariantRFC4122) return u } // NewV4 returns random generated UUID. func (g *generator) NewV4() UUID { u := UUID{} g.safeRandom(u[:]) u.SetVersion(V4) u.SetVariant(VariantRFC4122) return u } // NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. func (g *generator) NewV5(ns UUID, name string) UUID { u := newFromHash(sha1.New(), ns, name) u.SetVersion(V5) u.SetVariant(VariantRFC4122) return u } func (g *generator) initStorage() { g.initClockSequence() g.initHardwareAddr() } func (g *generator) initClockSequence() { buf := make([]byte, 2) g.safeRandom(buf) g.clockSequence = binary.BigEndian.Uint16(buf) } func (g *generator) initHardwareAddr() { interfaces, err := net.Interfaces() if err == nil { for _, iface := range interfaces { if len(iface.HardwareAddr) >= 6 { copy(g.hardwareAddr[:], iface.HardwareAddr) return } } } // Initialize hardwareAddr randomly in case // of real network interfaces absence g.safeRandom(g.hardwareAddr[:]) // Set multicast bit as recommended in RFC 4122 g.hardwareAddr[0] |= 0x01 } func (g *generator) safeRandom(dest []byte) { if _, err := rand.Read(dest); err != nil { panic(err) } } // Returns UUID v1/v2 storage state. // Returns epoch timestamp, clock sequence, and hardware address. func (g *generator) getStorage() (uint64, uint16, []byte) { g.storageOnce.Do(g.initStorage) g.storageMutex.Lock() defer g.storageMutex.Unlock() timeNow := epochFunc() // Clock changed backwards since last UUID generation. // Should increase clock sequence. if timeNow <= g.lastTime { g.clockSequence++ } g.lastTime = timeNow return timeNow, g.clockSequence, g.hardwareAddr[:] } // Returns difference in 100-nanosecond intervals between // UUID epoch (October 15, 1582) and current time. // This is default epoch calculation function. func unixTimeFunc() uint64 { return epochStart + uint64(time.Now().UnixNano()/100) } // Returns UUID based on hashing of namespace UUID and name. func newFromHash(h hash.Hash, ns UUID, name string) UUID { u := UUID{} h.Write(ns[:]) h.Write([]byte(name)) copy(u[:], h.Sum(nil)) return u }
8,741
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/codec.go
// Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package uuid import ( "bytes" "encoding/hex" "fmt" ) // FromBytes returns UUID converted from raw byte slice input. // It will return error if the slice isn't 16 bytes long. func FromBytes(input []byte) (u UUID, err error) { err = u.UnmarshalBinary(input) return } // FromBytesOrNil returns UUID converted from raw byte slice input. // Same behavior as FromBytes, but returns a Nil UUID on error. func FromBytesOrNil(input []byte) UUID { uuid, err := FromBytes(input) if err != nil { return Nil } return uuid } // FromString returns UUID parsed from string input. // Input is expected in a form accepted by UnmarshalText. func FromString(input string) (u UUID, err error) { err = u.UnmarshalText([]byte(input)) return } // FromStringOrNil returns UUID parsed from string input. // Same behavior as FromString, but returns a Nil UUID on error. func FromStringOrNil(input string) UUID { uuid, err := FromString(input) if err != nil { return Nil } return uuid } // MarshalText implements the encoding.TextMarshaler interface. // The encoding is the same as returned by String. func (u UUID) MarshalText() (text []byte, err error) { text = []byte(u.String()) return } // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // "6ba7b810-9dad-11d1-80b4-00c04fd430c8", // "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", // "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" // "6ba7b8109dad11d180b400c04fd430c8" // ABNF for supported UUID text representation follows: // uuid := canonical | hashlike | braced | urn // plain := canonical | hashlike // canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct // hashlike := 12hexoct // braced := '{' plain '}' // urn := URN ':' UUID-NID ':' plain // URN := 'urn' // UUID-NID := 'uuid' // 12hexoct := 6hexoct 6hexoct // 6hexoct := 4hexoct 2hexoct // 4hexoct := 2hexoct 2hexoct // 2hexoct := hexoct hexoct // hexoct := hexdig hexdig // hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | // 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | // 'A' | 'B' | 'C' | 'D' | 'E' | 'F' func (u *UUID) UnmarshalText(text []byte) (err error) { switch len(text) { case 32: return u.decodeHashLike(text) case 36: return u.decodeCanonical(text) case 38: return u.decodeBraced(text) case 41: fallthrough case 45: return u.decodeURN(text) default: return fmt.Errorf("uuid: incorrect UUID length: %s", text) } } // decodeCanonical decodes UUID string in format // "6ba7b810-9dad-11d1-80b4-00c04fd430c8". func (u *UUID) decodeCanonical(t []byte) (err error) { if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { return fmt.Errorf("uuid: incorrect UUID format %s", t) } src := t[:] dst := u[:] for i, byteGroup := range byteGroups { if i > 0 { src = src[1:] // skip dash } _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) if err != nil { return } src = src[byteGroup:] dst = dst[byteGroup/2:] } return } // decodeHashLike decodes UUID string in format // "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeHashLike(t []byte) (err error) { src := t[:] dst := u[:] if _, err = hex.Decode(dst, src); err != nil { return err } return } // decodeBraced decodes UUID string in format // "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format // "{6ba7b8109dad11d180b400c04fd430c8}". func (u *UUID) decodeBraced(t []byte) (err error) { l := len(t) if t[0] != '{' || t[l-1] != '}' { return fmt.Errorf("uuid: incorrect UUID format %s", t) } return u.decodePlain(t[1 : l-1]) } // decodeURN decodes UUID string in format // "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format // "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeURN(t []byte) (err error) { total := len(t) urn_uuid_prefix := t[:9] if !bytes.Equal(urn_uuid_prefix, urnPrefix) { return fmt.Errorf("uuid: incorrect UUID format: %s", t) } return u.decodePlain(t[9:total]) } // decodePlain decodes UUID string in canonical format // "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format // "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodePlain(t []byte) (err error) { switch len(t) { case 32: return u.decodeHashLike(t) case 36: return u.decodeCanonical(t) default: return fmt.Errorf("uuid: incorrrect UUID length: %s", t) } } // MarshalBinary implements the encoding.BinaryMarshaler interface. func (u UUID) MarshalBinary() (data []byte, err error) { data = u.Bytes() return } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. // It will return error if the slice isn't 16 bytes long. func (u *UUID) UnmarshalBinary(data []byte) (err error) { if len(data) != Size { err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) return } copy(u[:], data) return }
8,742
0
kubeflow_public_repos/fate-operator/vendor/github.com/satori
kubeflow_public_repos/fate-operator/vendor/github.com/satori/go.uuid/.travis.yml
language: go sudo: false go: - 1.2 - 1.3 - 1.4 - 1.5 - 1.6 - 1.7 - 1.8 - 1.9 - tip matrix: allow_failures: - go: tip fast_finish: true before_install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover script: - $HOME/gopath/bin/goveralls -service=travis-ci notifications: email: false
8,743
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/go113.go
// +build go1.13 package errors import ( stderrors "errors" ) // Is reports whether any error in err's chain matches target. // // The chain consists of err itself followed by the sequence of errors obtained by // repeatedly calling Unwrap. // // An error is considered to match a target if it is equal to that target or if // it implements a method Is(error) bool such that Is(target) returns true. func Is(err, target error) bool { return stderrors.Is(err, target) } // As finds the first error in err's chain that matches target, and if so, sets // target to that error value and returns true. // // The chain consists of err itself followed by the sequence of errors obtained by // repeatedly calling Unwrap. // // An error matches target if the error's concrete value is assignable to the value // pointed to by target, or if the error has a method As(interface{}) bool such that // As(target) returns true. In the latter case, the As method is responsible for // setting target. // // As will panic if target is not a non-nil pointer to either a type that implements // error, or to any interface type. As returns false if err is nil. func As(err error, target interface{}) bool { return stderrors.As(err, target) } // Unwrap returns the result of calling the Unwrap method on err, if err's // type contains an Unwrap method returning error. // Otherwise, Unwrap returns nil. func Unwrap(err error) error { return stderrors.Unwrap(err) }
8,744
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/stack.go
package errors import ( "fmt" "io" "path" "runtime" "strconv" "strings" ) // Frame represents a program counter inside a stack frame. // For historical reasons if Frame is interpreted as a uintptr // its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; // multiple frames may have the same PC value. func (f Frame) pc() uintptr { return uintptr(f) - 1 } // file returns the full path to the file that contains the // function for this Frame's pc. func (f Frame) file() string { fn := runtime.FuncForPC(f.pc()) if fn == nil { return "unknown" } file, _ := fn.FileLine(f.pc()) return file } // line returns the line number of source code of the // function for this Frame's pc. func (f Frame) line() int { fn := runtime.FuncForPC(f.pc()) if fn == nil { return 0 } _, line := fn.FileLine(f.pc()) return line } // name returns the name of this function, if known. func (f Frame) name() string { fn := runtime.FuncForPC(f.pc()) if fn == nil { return "unknown" } return fn.Name() } // Format formats the frame according to the fmt.Formatter interface. // // %s source file // %d source line // %n function name // %v equivalent to %s:%d // // Format accepts flags that alter the printing of some verbs, as follows: // // %+s function name and path of source file relative to the compile time // GOPATH separated by \n\t (<funcname>\n\t<path>) // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { case 's': switch { case s.Flag('+'): io.WriteString(s, f.name()) io.WriteString(s, "\n\t") io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': io.WriteString(s, strconv.Itoa(f.line())) case 'n': io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") f.Format(s, 'd') } } // MarshalText formats a stacktrace Frame as a text string. The output is the // same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. func (f Frame) MarshalText() ([]byte, error) { name := f.name() if name == "unknown" { return []byte(name), nil } return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil } // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame // Format formats the stack of Frames according to the fmt.Formatter interface. // // %s lists source files for each Frame in the stack // %v lists the source file and line number for each Frame in the stack // // Format accepts flags that alter the printing of some verbs, as follows: // // %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': switch { case s.Flag('+'): for _, f := range st { io.WriteString(s, "\n") f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: st.formatSlice(s, verb) } case 's': st.formatSlice(s, verb) } } // formatSlice will format this StackTrace into the given buffer as a slice of // Frame, only valid when called with '%s' or '%v'. func (st StackTrace) formatSlice(s fmt.State, verb rune) { io.WriteString(s, "[") for i, f := range st { if i > 0 { io.WriteString(s, " ") } f.Format(s, verb) } io.WriteString(s, "]") } // stack represents a stack of program counters. type stack []uintptr func (s *stack) Format(st fmt.State, verb rune) { switch verb { case 'v': switch { case st.Flag('+'): for _, pc := range *s { f := Frame(pc) fmt.Fprintf(st, "\n%+v", f) } } } } func (s *stack) StackTrace() StackTrace { f := make([]Frame, len(*s)) for i := 0; i < len(f); i++ { f[i] = Frame((*s)[i]) } return f } func callers() *stack { const depth = 32 var pcs [depth]uintptr n := runtime.Callers(3, pcs[:]) var st stack = pcs[0:n] return &st } // funcname removes the path prefix component of a function's name reported by func.Name(). func funcname(name string) string { i := strings.LastIndex(name, "/") name = name[i+1:] i = strings.Index(name, ".") return name[i+1:] }
8,745
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/README.md
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) Package errors provides simple error handling primitives. `go get github.com/pkg/errors` The traditional error handling idiom in Go is roughly akin to ```go if err != nil { return err } ``` which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. ## Adding context to an error The errors.Wrap function returns a new error that adds context to the original error. For example ```go _, err := ioutil.ReadAll(r) if err != nil { return errors.Wrap(err, "read failed") } ``` ## Retrieving the cause of an error Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. ```go type causer interface { Cause() error } ``` `errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: ```go switch err := errors.Cause(err).(type) { case *MyError: // handle specifically default: // unknown error } ``` [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). ## Roadmap With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) - 1.0. Final release. ## Contributing Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. Before sending a PR, please discuss your change by raising an issue. ## License BSD-2-Clause
8,746
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/Makefile
PKGS := github.com/pkg/errors SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) GO := go check: test vet gofmt misspell unconvert staticcheck ineffassign unparam test: $(GO) test $(PKGS) vet: | test $(GO) vet $(PKGS) staticcheck: $(GO) get honnef.co/go/tools/cmd/staticcheck staticcheck -checks all $(PKGS) misspell: $(GO) get github.com/client9/misspell/cmd/misspell misspell \ -locale GB \ -error \ *.md *.go unconvert: $(GO) get github.com/mdempsky/unconvert unconvert -v $(PKGS) ineffassign: $(GO) get github.com/gordonklaus/ineffassign find $(SRCDIRS) -name '*.go' | xargs ineffassign pedantic: check errcheck unparam: $(GO) get mvdan.cc/unparam unparam ./... errcheck: $(GO) get github.com/kisielk/errcheck errcheck $(PKGS) gofmt: @echo Checking code is gofmted @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
8,747
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/LICENSE
Copyright (c) 2015, Dave Cheney <[email protected]> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8,748
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/appveyor.yml
version: build-{build}.{branch} clone_folder: C:\gopath\src\github.com\pkg\errors shallow_clone: true # for startup speed environment: GOPATH: C:\gopath platform: - x64 # http://www.appveyor.com/docs/installed-software install: # some helpful output for debugging builds - go version - go env # pre-installed MinGW at C:\MinGW is 32bit only # but MSYS2 at C:\msys64 has mingw64 - set PATH=C:\msys64\mingw64\bin;%PATH% - gcc --version - g++ --version build_script: - go install -v ./... test_script: - set PATH=C:\gopath\bin;%PATH% - go test -v ./... #artifacts: # - path: '%GOPATH%\bin\*.exe' deploy: off
8,749
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/errors.go
// Package errors provides simple error handling primitives. // // The traditional error handling idiom in Go is roughly akin to // // if err != nil { // return err // } // // which when applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. // // Adding context to an error // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, // together with the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // // If additional control is required, the errors.WithStack and // errors.WithMessage functions destructure errors.Wrap into its component // operations: annotating an error with a stack trace and with a message, // respectively. // // Retrieving the cause of an error // // Using errors.Wrap constructs a stack of errors, adding context to the // preceding error. Depending on the nature of the error it may be necessary // to reverse the operation of errors.Wrap to retrieve the original error // for inspection. Any error value which implements this interface // // type causer interface { // Cause() error // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve // the topmost error that does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { // case *MyError: // // handle specifically // default: // // unknown error // } // // Although the causer interface is not exported by this package, it is // considered a part of its stable public interface. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can // be formatted by the fmt package. The following verbs are supported: // // %s print the error. If the error has a Cause it will be // printed recursively. // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. // // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are // invoked. This information can be retrieved with the following interface: // // type stackTracer interface { // StackTrace() errors.StackTrace // } // // The returned errors.StackTrace type is defined as // // type StackTrace []Frame // // The Frame type represents a call site in the stack trace. Frame supports // the fmt.Formatter interface that can be used for printing information about // the stack trace of this error. For example: // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { // fmt.Printf("%+s:%d\n", f, f) // } // } // // Although the stackTracer interface is not exported by this package, it is // considered a part of its stable public interface. // // See the documentation for Frame.Format for more details. package errors import ( "fmt" "io" ) // New returns an error with the supplied message. // New also records the stack trace at the point it was called. func New(message string) error { return &fundamental{ msg: message, stack: callers(), } } // Errorf formats according to a format specifier and returns the string // as a value that satisfies error. // Errorf also records the stack trace at the point it was called. func Errorf(format string, args ...interface{}) error { return &fundamental{ msg: fmt.Sprintf(format, args...), stack: callers(), } } // fundamental is an error that has a message and a stack, but no caller. type fundamental struct { msg string *stack } func (f *fundamental) Error() string { return f.msg } func (f *fundamental) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { io.WriteString(s, f.msg) f.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, f.msg) case 'q': fmt.Fprintf(s, "%q", f.msg) } } // WithStack annotates err with a stack trace at the point WithStack was called. // If err is nil, WithStack returns nil. func WithStack(err error) error { if err == nil { return nil } return &withStack{ err, callers(), } } type withStack struct { error *stack } func (w *withStack) Cause() error { return w.error } // Unwrap provides compatibility for Go 1.13 error chains. func (w *withStack) Unwrap() error { return w.error } func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v", w.Cause()) w.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, w.Error()) case 'q': fmt.Fprintf(s, "%q", w.Error()) } } // Wrap returns an error annotating err with a stack trace // at the point Wrap is called, and the supplied message. // If err is nil, Wrap returns nil. func Wrap(err error, message string) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: message, } return &withStack{ err, callers(), } } // Wrapf returns an error annotating err with a stack trace // at the point Wrapf is called, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: fmt.Sprintf(format, args...), } return &withStack{ err, callers(), } } // WithMessage annotates err with a new message. // If err is nil, WithMessage returns nil. func WithMessage(err error, message string) error { if err == nil { return nil } return &withMessage{ cause: err, msg: message, } } // WithMessagef annotates err with the format specifier. // If err is nil, WithMessagef returns nil. func WithMessagef(err error, format string, args ...interface{}) error { if err == nil { return nil } return &withMessage{ cause: err, msg: fmt.Sprintf(format, args...), } } type withMessage struct { cause error msg string } func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } // Unwrap provides compatibility for Go 1.13 error chains. func (w *withMessage) Unwrap() error { return w.cause } func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v\n", w.Cause()) io.WriteString(s, w.msg) return } fallthrough case 's', 'q': io.WriteString(s, w.Error()) } } // Cause returns the underlying cause of the error, if possible. // An error value has a cause if it implements the following // interface: // // type causer interface { // Cause() error // } // // If the error does not implement Cause, the original error will // be returned. If the error is nil, nil will be returned without further // investigation. func Cause(err error) error { type causer interface { Cause() error } for err != nil { cause, ok := err.(causer) if !ok { break } err = cause.Cause() } return err }
8,750
0
kubeflow_public_repos/fate-operator/vendor/github.com/pkg
kubeflow_public_repos/fate-operator/vendor/github.com/pkg/errors/.travis.yml
language: go go_import_path: github.com/pkg/errors go: - 1.11.x - 1.12.x - 1.13.x - tip script: - make check
8,751
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/merge.go
// Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "fmt" "reflect" ) func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { exported = exported || hasMergeableFields(dst.Field(i)) } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } func isExportedComponent(field *reflect.StructField) bool { pkgPath := field.PkgPath if len(pkgPath) > 0 { return false } c := field.Name[0] if 'a' <= c && c <= 'z' || c == '_' { return false } return true } type Config struct { Overwrite bool AppendSlice bool TypeCheck bool Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool debug bool } type Transformers interface { Transformer(reflect.Type) func(dst, src reflect.Value) error } // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite typeCheck := config.TypeCheck overwriteWithEmptySrc := config.overwriteWithEmptyValue overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return } if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr seen := visited[h] typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { return nil } } // Remember, remember... visited[h] = &visit{addr, typ, seen} } if config.Transformers != nil && !isEmptyValue(dst) { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return } } switch dst.Kind() { case reflect.Struct: if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } if src.Kind() != reflect.Map { if overwrite { dst.Set(src) } return } for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { continue } dstElement := dst.MapIndex(key) switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { if overwrite { dst.SetMapIndex(key, srcElement) } continue } fallthrough default: if !srcElement.CanInterface() { continue } switch reflect.TypeOf(srcElement.Interface()).Kind() { case reflect.Struct: fallthrough case reflect.Ptr: fallthrough case reflect.Map: srcMapElm := srcElement dstMapElm := dstElement if srcMapElm.CanInterface() { srcMapElm = reflect.ValueOf(srcMapElm.Interface()) if dstMapElm.IsValid() { dstMapElm = reflect.ValueOf(dstMapElm.Interface()) } } if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { return } case reflect.Slice: srcSlice := reflect.ValueOf(srcElement.Interface()) var dstSlice reflect.Value if !dstElement.IsValid() || dstElement.IsNil() { dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) } else { dstSlice = reflect.ValueOf(dstElement.Interface()) } if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = srcSlice } else if config.AppendSlice { if srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) } else if sliceDeepCopy { i := 0 for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { srcElement := srcSlice.Index(i) dstElement := dstSlice.Index(i) if srcElement.CanInterface() { srcElement = reflect.ValueOf(srcElement.Interface()) } if dstElement.CanInterface() { dstElement = reflect.ValueOf(dstElement.Interface()) } if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } } dst.SetMapIndex(key, dstSlice) } } if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } case reflect.Slice: if !dst.CanSet() { break } if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) } dst.Set(reflect.AppendSlice(dst, src)) } else if sliceDeepCopy { for i := 0; i < src.Len() && i < dst.Len(); i++ { srcElement := src.Index(i) dstElement := dst.Index(i) if srcElement.CanInterface() { srcElement = reflect.ValueOf(srcElement.Interface()) } if dstElement.CanInterface() { dstElement = reflect.ValueOf(dstElement.Interface()) } if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } } case reflect.Ptr: fallthrough case reflect.Interface: if isReflectNil(src) { if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { dst.Set(src) } break } if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { return } } else { return ErrDifferentArgumentsTypes } break } if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } break } if dst.Elem().Kind() == src.Elem().Kind() { if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } break } default: mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) } else { dst = src } } } return } // Merge will fill any empty for value type attributes on the dst struct using corresponding // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, append(opts, WithOverride)...) } // WithTransformers adds transformers to merge, allowing to customize the merging of some types. func WithTransformers(transformers Transformers) func(*Config) { return func(config *Config) { config.Transformers = transformers } } // WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. func WithOverride(config *Config) { config.Overwrite = true } // WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. func WithOverwriteWithEmptyValue(config *Config) { config.Overwrite = true config.overwriteWithEmptyValue = true } // WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } // WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). func WithTypeCheck(config *Config) { config.TypeCheck = true } // WithSliceDeepCopy will merge slice element one by one with Overwrite flag. func WithSliceDeepCopy(config *Config) { config.sliceDeepCopy = true config.Overwrite = true } func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { return ErrNonPointerAgument } var ( vDst, vSrc reflect.Value err error ) config := &Config{} for _, opt := range opts { opt(config) } if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } // IsReflectNil is the reflect value provided nil func isReflectNil(v reflect.Value) bool { k := v.Kind() switch k { case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: // Both interface and slice are nil if first word is 0. // Both are always bigger than a word; assume flagIndir. return v.IsNil() default: return false } }
8,752
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [email protected]. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/
8,753
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/go.mod
module github.com/imdario/mergo go 1.13 require gopkg.in/yaml.v2 v2.3.0
8,754
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/README.md
# Mergo [![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] [![Build Status][1]][2] [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] [![GoCenter Kudos][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg [6]: https://github.com/imdario/mergo/releases [7]: https://goreportcard.com/badge/imdario/mergo [8]: https://goreportcard.com/report/github.com/imdario/mergo [9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master [10]: https://coveralls.io/github/imdario/mergo?branch=master [11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo [16]: https://search.gocenter.io/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. ## Status It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: <a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) [![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) <a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a> ### Mergo in the wild - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) - [Shopify/themekit](https://github.com/Shopify/themekit) - [imdario/zas](https://github.com/imdario/zas) - [matcornic/hermes](https://github.com/matcornic/hermes) - [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) - [kataras/iris](https://github.com/kataras/iris) - [michaelsauter/crane](https://github.com/michaelsauter/crane) - [go-task/task](https://github.com/go-task/task) - [sensu/uchiwa](https://github.com/sensu/uchiwa) - [ory/hydra](https://github.com/ory/hydra) - [sisatech/vcli](https://github.com/sisatech/vcli) - [dairycart/dairycart](https://github.com/dairycart/dairycart) - [projectcalico/felix](https://github.com/projectcalico/felix) - [resin-os/balena](https://github.com/resin-os/balena) - [go-kivik/kivik](https://github.com/go-kivik/kivik) - [Telefonica/govice](https://github.com/Telefonica/govice) - [supergiant/supergiant](supergiant/supergiant) - [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) - [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) - [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) - [EagerIO/Stout](https://github.com/EagerIO/Stout) - [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) - [russross/canvasassignments](https://github.com/russross/canvasassignments) - [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) - [casualjim/exeggutor](https://github.com/casualjim/exeggutor) - [divshot/gitling](https://github.com/divshot/gitling) - [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) - [andrerocker/deploy42](https://github.com/andrerocker/deploy42) - [elwinar/rambler](https://github.com/elwinar/rambler) - [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) - [jfbus/impressionist](https://github.com/jfbus/impressionist) - [Jmeyering/zealot](https://github.com/Jmeyering/zealot) - [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) - [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) - [thoas/picfit](https://github.com/thoas/picfit) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [janoszen/containerssh](https://github.com/janoszen/containerssh) ## Install go get github.com/imdario/mergo // use in your .go code import ( "github.com/imdario/mergo" ) ## Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { // ... } ``` Also, you can merge overwriting values using the transformer `WithOverride`. ```go if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { // ... } ``` Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go if err := mergo.Map(&dst, srcMap); err != nil { // ... } ``` Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. Here is a nice example: ```go package main import ( "fmt" "github.com/imdario/mergo" ) type Foo struct { A string B int64 } func main() { src := Foo{ A: "one", B: 2, } dest := Foo{ A: "two", } mergo.Merge(&dest, src) fmt.Println(dest) // Will print // {two 2} } ``` Note: if test are failing due missing package, please execute: go get gopkg.in/yaml.v2 ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? ```go package main import ( "fmt" "github.com/imdario/mergo" "reflect" "time" ) type timeTransformer struct { } func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { isZero := dst.MethodByName("IsZero") result := isZero.Call([]reflect.Value{}) if result[0].Bool() { dst.Set(src) } } return nil } } return nil } type Snapshot struct { Time time.Time // ... } func main() { src := Snapshot{time.Now()} dest := Snapshot{} mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } ``` ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) ## About Written by [Dario Castañé](http://dario.im). ## Top Contributors [![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) [![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) [![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) [![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) [![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) [![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) [![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) [![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
8,755
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/map.go
// Copyright 2014 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "fmt" "reflect" "unicode" "unicode/utf8" ) func changeInitialCase(s string, mapper func(rune) rune) string { if s == "" { return s } r, n := utf8.DecodeRuneInString(s) return string(mapper(r)) + s[n:] } func isExported(field reflect.StructField) bool { r, _ := utf8.DecodeRuneInString(field.Name) return r >= 'A' && r <= 'Z' } // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr seen := visited[h] typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { return nil } } // Remember, remember... visited[h] = &visit{addr, typ, seen} } zeroValue := reflect.Value{} switch dst.Kind() { case reflect.Map: dstMap := dst.Interface().(map[string]interface{}) for i, n := 0, src.NumField(); i < n; i++ { srcType := src.Type() field := srcType.Field(i) if !isExported(field) { continue } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } case reflect.Ptr: if dst.IsNil() { v := reflect.New(dst.Type().Elem()) dst.Set(v) } dst = dst.Elem() fallthrough case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) if dstElement == zeroValue { // We discard it because the field doesn't exist. continue } srcElement := reflect.ValueOf(srcValue) dstKind := dstElement.Kind() srcKind := srcElement.Kind() if srcKind == reflect.Ptr && dstKind != reflect.Ptr { srcElement = srcElement.Elem() srcKind = reflect.TypeOf(srcElement.Interface()).Kind() } else if dstKind == reflect.Ptr { // Can this work? I guess it can't. if srcKind != reflect.Ptr && srcElement.CanAddr() { srcPtr := srcElement.Addr() srcElement = reflect.ValueOf(srcPtr) srcKind = reflect.Ptr } } if !srcElement.IsValid() { continue } if srcKind == dstKind { if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if srcKind == reflect.Map { if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else { return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) } } } return } // Map sets fields' values in dst from src. // src can be a map with string keys or a struct. dst must be the opposite: // if src is a map, dst must be a valid pointer to struct. If src is a struct, // dst must be map[string]interface{}. // It won't merge unexported (private) fields and will do recursively // any exported field. // If dst is a map, keys will be src fields' names in lower camel case. // Missing key in src that doesn't match a field in dst will be skipped. This // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. func Map(dst, src interface{}, opts ...func(*Config)) error { return _map(dst, src, opts...) } // MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: Use Map(…) with WithOverride func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { return _map(dst, src, append(opts, WithOverride)...) } func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { return ErrNonPointerAgument } var ( vDst, vSrc reflect.Value err error ) config := &Config{} for _, opt := range opts { opt(config) } if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } switch vSrc.Kind() { case reflect.Struct: if vDst.Kind() != reflect.Map { return ErrExpectedMapAsDestination } case reflect.Map: if vDst.Kind() != reflect.Struct { return ErrExpectedStructAsDestination } default: return ErrNotSupported } return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) }
8,756
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/LICENSE
Copyright (c) 2013 Dario Castañé. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8,757
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/doc.go
// Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). Status It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. Important note Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). Install Do your usual installation procedure: go get github.com/imdario/mergo // use in your .go code import ( "github.com/imdario/mergo" ) Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). if err := mergo.Merge(&dst, src); err != nil { // ... } Also, you can merge overwriting values using the transformer WithOverride. if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { // ... } Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. if err := mergo.Map(&dst, srcMap); err != nil { // ... } Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. Here is a nice example: package main import ( "fmt" "github.com/imdario/mergo" ) type Foo struct { A string B int64 } func main() { src := Foo{ A: "one", B: 2, } dest := Foo{ A: "two", } mergo.Merge(&dest, src) fmt.Println(dest) // Will print // {two 2} } Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? package main import ( "fmt" "github.com/imdario/mergo" "reflect" "time" ) type timeTransformer struct { } func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { isZero := dst.MethodByName("IsZero") result := isZero.Call([]reflect.Value{}) if result[0].Bool() { dst.Set(src) } } return nil } } return nil } type Snapshot struct { Time time.Time // ... } func main() { src := Snapshot{time.Now()} dest := Snapshot{} mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario About Written by Dario Castañé: https://da.rio.hn License BSD 3-Clause license, as Go language. */ package mergo
8,758
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/go.sum
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
8,759
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/mergo.go
// Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "errors" "reflect" ) // Errors reported by Mergo when it finds invalid arguments. var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are // in progress. The comparison algorithm assumes that all // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { ptr uintptr typ reflect.Type next *visit } // From src/pkg/encoding/json/encode.go. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: if v.IsNil() { return true } return isEmptyValue(v.Elem()) case reflect.Func: return v.IsNil() case reflect.Invalid: return true } return false } func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { if dst == nil || src == nil { err = ErrNilArguments return } vDst = reflect.ValueOf(dst).Elem() if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { err = ErrNotSupported return } vSrc = reflect.ValueOf(src) // We check if vSrc is a pointer to dereference it. if vSrc.Kind() == reflect.Ptr { vSrc = vSrc.Elem() } return }
8,760
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/.travis.yml
language: go install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: - go test -race -v ./... after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
8,761
0
kubeflow_public_repos/fate-operator/vendor/github.com/imdario
kubeflow_public_repos/fate-operator/vendor/github.com/imdario/mergo/.deepsource.toml
version = 1 test_patterns = [ "*_test.go" ] [[analyzers]] name = "go" enabled = true [analyzers.meta] import_path = "github.com/imdario/mergo"
8,762
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/not_go112.go
// +build !go1.12 package zerolog const contextCallerSkipFrameCount = 3
8,763
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/encoder_cbor.go
// +build binary_log package zerolog // This file contains bindings to do binary encoding. import ( "github.com/rs/zerolog/internal/cbor" ) var ( _ encoder = (*cbor.Encoder)(nil) enc = cbor.Encoder{} ) func appendJSON(dst []byte, j []byte) []byte { return cbor.AppendEmbeddedJSON(dst, j) } // decodeIfBinaryToString - converts a binary formatted log msg to a // JSON formatted String Log message. func decodeIfBinaryToString(in []byte) string { return cbor.DecodeIfBinaryToString(in) } func decodeObjectToStr(in []byte) string { return cbor.DecodeObjectToStr(in) } // decodeIfBinaryToBytes - converts a binary formatted log msg to a // JSON formatted Bytes Log message. func decodeIfBinaryToBytes(in []byte) []byte { return cbor.DecodeIfBinaryToBytes(in) }
8,764
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/array.go
package zerolog import ( "net" "sync" "time" ) var arrayPool = &sync.Pool{ New: func() interface{} { return &Array{ buf: make([]byte, 0, 500), } }, } // Array is used to prepopulate an array of items // which can be re-used to add to log messages. type Array struct { buf []byte } func putArray(a *Array) { // Proper usage of a sync.Pool requires each entry to have approximately // the same memory cost. To obtain this property when the stored type // contains a variably-sized buffer, we add a hard limit on the maximum buffer // to place back in the pool. // // See https://golang.org/issue/23199 const maxSize = 1 << 16 // 64KiB if cap(a.buf) > maxSize { return } arrayPool.Put(a) } // Arr creates an array to be added to an Event or Context. func Arr() *Array { a := arrayPool.Get().(*Array) a.buf = a.buf[:0] return a } // MarshalZerologArray method here is no-op - since data is // already in the needed format. func (*Array) MarshalZerologArray(*Array) { } func (a *Array) write(dst []byte) []byte { dst = enc.AppendArrayStart(dst) if len(a.buf) > 0 { dst = append(append(dst, a.buf...)) } dst = enc.AppendArrayEnd(dst) putArray(a) return dst } // Object marshals an object that implement the LogObjectMarshaler // interface and append append it to the array. func (a *Array) Object(obj LogObjectMarshaler) *Array { e := Dict() obj.MarshalZerologObject(e) e.buf = enc.AppendEndMarker(e.buf) a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) putEvent(e) return a } // Str append append the val as a string to the array. func (a *Array) Str(val string) *Array { a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) return a } // Bytes append append the val as a string to the array. func (a *Array) Bytes(val []byte) *Array { a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) return a } // Hex append append the val as a hex string to the array. func (a *Array) Hex(val []byte) *Array { a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) return a } // RawJSON adds already encoded JSON to the array. func (a *Array) RawJSON(val []byte) *Array { a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) return a } // Err serializes and appends the err to the array. func (a *Array) Err(err error) *Array { switch m := ErrorMarshalFunc(err).(type) { case LogObjectMarshaler: e := newEvent(nil, 0) e.buf = e.buf[:0] e.appendObject(m) a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) putEvent(e) case error: if m == nil || isNilValue(m) { a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) } else { a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) } case string: a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) default: a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) } return a } // Bool append append the val as a bool to the array. func (a *Array) Bool(b bool) *Array { a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) return a } // Int append append i as a int to the array. func (a *Array) Int(i int) *Array { a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) return a } // Int8 append append i as a int8 to the array. func (a *Array) Int8(i int8) *Array { a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) return a } // Int16 append append i as a int16 to the array. func (a *Array) Int16(i int16) *Array { a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) return a } // Int32 append append i as a int32 to the array. func (a *Array) Int32(i int32) *Array { a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) return a } // Int64 append append i as a int64 to the array. func (a *Array) Int64(i int64) *Array { a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) return a } // Uint append append i as a uint to the array. func (a *Array) Uint(i uint) *Array { a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) return a } // Uint8 append append i as a uint8 to the array. func (a *Array) Uint8(i uint8) *Array { a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) return a } // Uint16 append append i as a uint16 to the array. func (a *Array) Uint16(i uint16) *Array { a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) return a } // Uint32 append append i as a uint32 to the array. func (a *Array) Uint32(i uint32) *Array { a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) return a } // Uint64 append append i as a uint64 to the array. func (a *Array) Uint64(i uint64) *Array { a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) return a } // Float32 append append f as a float32 to the array. func (a *Array) Float32(f float32) *Array { a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) return a } // Float64 append append f as a float64 to the array. func (a *Array) Float64(f float64) *Array { a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) return a } // Time append append t formated as string using zerolog.TimeFieldFormat. func (a *Array) Time(t time.Time) *Array { a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) return a } // Dur append append d to the array. func (a *Array) Dur(d time.Duration) *Array { a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) return a } // Interface append append i marshaled using reflection. func (a *Array) Interface(i interface{}) *Array { if obj, ok := i.(LogObjectMarshaler); ok { return a.Object(obj) } a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) return a } // IPAddr adds IPv4 or IPv6 address to the array func (a *Array) IPAddr(ip net.IP) *Array { a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) return a } // IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array func (a *Array) IPPrefix(pfx net.IPNet) *Array { a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) return a } // MACAddr adds a MAC (Ethernet) address to the array func (a *Array) MACAddr(ha net.HardwareAddr) *Array { a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) return a }
8,765
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/go.mod
module github.com/rs/zerolog require ( github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e github.com/pkg/errors v0.8.1 github.com/rs/xid v1.2.1 golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74 )
8,766
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/go112.go
// +build go1.12 package zerolog // Since go 1.12, some auto generated init functions are hidden from // runtime.Caller. const contextCallerSkipFrameCount = 2
8,767
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/context.go
package zerolog import ( "io/ioutil" "math" "net" "time" ) // Context configures a new sub-logger with contextual fields. type Context struct { l Logger } // Logger returns the logger with the context previously set. func (c Context) Logger() Logger { return c.l } // Fields is a helper function to use a map to set fields using type assertion. func (c Context) Fields(fields map[string]interface{}) Context { c.l.context = appendFields(c.l.context, fields) return c } // Dict adds the field key with the dict to the logger context. func (c Context) Dict(key string, dict *Event) Context { dict.buf = enc.AppendEndMarker(dict.buf) c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) putEvent(dict) return c } // Array adds the field key with an array to the event context. // Use zerolog.Arr() to create the array or pass a type that // implement the LogArrayMarshaler interface. func (c Context) Array(key string, arr LogArrayMarshaler) Context { c.l.context = enc.AppendKey(c.l.context, key) if arr, ok := arr.(*Array); ok { c.l.context = arr.write(c.l.context) return c } var a *Array if aa, ok := arr.(*Array); ok { a = aa } else { a = Arr() arr.MarshalZerologArray(a) } c.l.context = a.write(c.l.context) return c } // Object marshals an object that implement the LogObjectMarshaler interface. func (c Context) Object(key string, obj LogObjectMarshaler) Context { e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) e.Object(key, obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) return c } // EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. func (c Context) EmbedObject(obj LogObjectMarshaler) Context { e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) e.EmbedObject(obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) return c } // Str adds the field key with val as a string to the logger context. func (c Context) Str(key, val string) Context { c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) return c } // Strs adds the field key with val as a string to the logger context. func (c Context) Strs(key string, vals []string) Context { c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) return c } // Bytes adds the field key with val as a []byte to the logger context. func (c Context) Bytes(key string, val []byte) Context { c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) return c } // Hex adds the field key with val as a hex string to the logger context. func (c Context) Hex(key string, val []byte) Context { c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) return c } // RawJSON adds already encoded JSON to context. // // No sanity check is performed on b; it must not contain carriage returns and // be valid JSON. func (c Context) RawJSON(key string, b []byte) Context { c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) return c } // AnErr adds the field key with serialized err to the logger context. func (c Context) AnErr(key string, err error) Context { switch m := ErrorMarshalFunc(err).(type) { case nil: return c case LogObjectMarshaler: return c.Object(key, m) case error: if m == nil || isNilValue(m) { return c } else { return c.Str(key, m.Error()) } case string: return c.Str(key, m) default: return c.Interface(key, m) } } // Errs adds the field key with errs as an array of serialized errors to the // logger context. func (c Context) Errs(key string, errs []error) Context { arr := Arr() for _, err := range errs { switch m := ErrorMarshalFunc(err).(type) { case LogObjectMarshaler: arr = arr.Object(m) case error: if m == nil || isNilValue(m) { arr = arr.Interface(nil) } else { arr = arr.Str(m.Error()) } case string: arr = arr.Str(m) default: arr = arr.Interface(m) } } return c.Array(key, arr) } // Err adds the field "error" with serialized err to the logger context. func (c Context) Err(err error) Context { return c.AnErr(ErrorFieldName, err) } // Bool adds the field key with val as a bool to the logger context. func (c Context) Bool(key string, b bool) Context { c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) return c } // Bools adds the field key with val as a []bool to the logger context. func (c Context) Bools(key string, b []bool) Context { c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) return c } // Int adds the field key with i as a int to the logger context. func (c Context) Int(key string, i int) Context { c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) return c } // Ints adds the field key with i as a []int to the logger context. func (c Context) Ints(key string, i []int) Context { c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) return c } // Int8 adds the field key with i as a int8 to the logger context. func (c Context) Int8(key string, i int8) Context { c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) return c } // Ints8 adds the field key with i as a []int8 to the logger context. func (c Context) Ints8(key string, i []int8) Context { c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) return c } // Int16 adds the field key with i as a int16 to the logger context. func (c Context) Int16(key string, i int16) Context { c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) return c } // Ints16 adds the field key with i as a []int16 to the logger context. func (c Context) Ints16(key string, i []int16) Context { c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) return c } // Int32 adds the field key with i as a int32 to the logger context. func (c Context) Int32(key string, i int32) Context { c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) return c } // Ints32 adds the field key with i as a []int32 to the logger context. func (c Context) Ints32(key string, i []int32) Context { c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) return c } // Int64 adds the field key with i as a int64 to the logger context. func (c Context) Int64(key string, i int64) Context { c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) return c } // Ints64 adds the field key with i as a []int64 to the logger context. func (c Context) Ints64(key string, i []int64) Context { c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) return c } // Uint adds the field key with i as a uint to the logger context. func (c Context) Uint(key string, i uint) Context { c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) return c } // Uints adds the field key with i as a []uint to the logger context. func (c Context) Uints(key string, i []uint) Context { c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) return c } // Uint8 adds the field key with i as a uint8 to the logger context. func (c Context) Uint8(key string, i uint8) Context { c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) return c } // Uints8 adds the field key with i as a []uint8 to the logger context. func (c Context) Uints8(key string, i []uint8) Context { c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) return c } // Uint16 adds the field key with i as a uint16 to the logger context. func (c Context) Uint16(key string, i uint16) Context { c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) return c } // Uints16 adds the field key with i as a []uint16 to the logger context. func (c Context) Uints16(key string, i []uint16) Context { c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) return c } // Uint32 adds the field key with i as a uint32 to the logger context. func (c Context) Uint32(key string, i uint32) Context { c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) return c } // Uints32 adds the field key with i as a []uint32 to the logger context. func (c Context) Uints32(key string, i []uint32) Context { c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) return c } // Uint64 adds the field key with i as a uint64 to the logger context. func (c Context) Uint64(key string, i uint64) Context { c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) return c } // Uints64 adds the field key with i as a []uint64 to the logger context. func (c Context) Uints64(key string, i []uint64) Context { c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) return c } // Float32 adds the field key with f as a float32 to the logger context. func (c Context) Float32(key string, f float32) Context { c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) return c } // Floats32 adds the field key with f as a []float32 to the logger context. func (c Context) Floats32(key string, f []float32) Context { c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) return c } // Float64 adds the field key with f as a float64 to the logger context. func (c Context) Float64(key string, f float64) Context { c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) return c } // Floats64 adds the field key with f as a []float64 to the logger context. func (c Context) Floats64(key string, f []float64) Context { c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) return c } type timestampHook struct{} func (ts timestampHook) Run(e *Event, level Level, msg string) { e.Timestamp() } var th = timestampHook{} // Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key. // To customize the key name, change zerolog.TimestampFieldName. // // NOTE: It won't dedupe the "time" key if the *Context has one already. func (c Context) Timestamp() Context { c.l = c.l.Hook(th) return c } // Time adds the field key with t formated as string using zerolog.TimeFieldFormat. func (c Context) Time(key string, t time.Time) Context { c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) return c } // Times adds the field key with t formated as string using zerolog.TimeFieldFormat. func (c Context) Times(key string, t []time.Time) Context { c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) return c } // Dur adds the fields key with d divided by unit and stored as a float. func (c Context) Dur(key string, d time.Duration) Context { c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) return c } // Durs adds the fields key with d divided by unit and stored as a float. func (c Context) Durs(key string, d []time.Duration) Context { c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) return c } // Interface adds the field key with obj marshaled using reflection. func (c Context) Interface(key string, i interface{}) Context { c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) return c } type callerHook struct { callerSkipFrameCount int } func newCallerHook(skipFrameCount int) callerHook { return callerHook{callerSkipFrameCount: skipFrameCount} } func (ch callerHook) Run(e *Event, level Level, msg string) { switch ch.callerSkipFrameCount { case useGlobalSkipFrameCount: // Extra frames to skip (added by hook infra). e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) default: // Extra frames to skip (added by hook infra). e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) } } // useGlobalSkipFrameCount acts as a flag to informat callerHook.Run // to use the global CallerSkipFrameCount. const useGlobalSkipFrameCount = math.MinInt32 // ch is the default caller hook using the global CallerSkipFrameCount. var ch = newCallerHook(useGlobalSkipFrameCount) // Caller adds the file:line of the caller with the zerolog.CallerFieldName key. func (c Context) Caller() Context { c.l = c.l.Hook(ch) return c } // CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. // The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. // If set to -1 the global CallerSkipFrameCount will be used. func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { c.l = c.l.Hook(newCallerHook(skipFrameCount)) return c } type stackTraceHook struct{} func (sh stackTraceHook) Run(e *Event, level Level, msg string) { e.Stack() } var sh = stackTraceHook{} // Stack enables stack trace printing for the error passed to Err(). func (c Context) Stack() Context { c.l = c.l.Hook(sh) return c } // IPAddr adds IPv4 or IPv6 Address to the context func (c Context) IPAddr(key string, ip net.IP) Context { c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) return c } // IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context func (c Context) IPPrefix(key string, pfx net.IPNet) Context { c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) return c } // MACAddr adds MAC address to the context func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) return c }
8,768
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/console.go
package zerolog import ( "bytes" "encoding/json" "fmt" "io" "os" "sort" "strconv" "strings" "sync" "time" ) const ( colorBlack = iota + 30 colorRed colorGreen colorYellow colorBlue colorMagenta colorCyan colorWhite colorBold = 1 colorDarkGray = 90 ) var ( consoleBufPool = sync.Pool{ New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 100)) }, } ) const ( consoleDefaultTimeFormat = time.Kitchen ) // Formatter transforms the input into a formatted string. type Formatter func(interface{}) string // ConsoleWriter parses the JSON input and writes it in an // (optionally) colorized, human-friendly format to Out. type ConsoleWriter struct { // Out is the output destination. Out io.Writer // NoColor disables the colorized output. NoColor bool // TimeFormat specifies the format for timestamp in output. TimeFormat string // PartsOrder defines the order of parts in output. PartsOrder []string FormatTimestamp Formatter FormatLevel Formatter FormatCaller Formatter FormatMessage Formatter FormatFieldName Formatter FormatFieldValue Formatter FormatErrFieldName Formatter FormatErrFieldValue Formatter } // NewConsoleWriter creates and initializes a new ConsoleWriter. func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { w := ConsoleWriter{ Out: os.Stdout, TimeFormat: consoleDefaultTimeFormat, PartsOrder: consoleDefaultPartsOrder(), } for _, opt := range options { opt(&w) } return w } // Write transforms the JSON input with formatters and appends to w.Out. func (w ConsoleWriter) Write(p []byte) (n int, err error) { if w.PartsOrder == nil { w.PartsOrder = consoleDefaultPartsOrder() } var buf = consoleBufPool.Get().(*bytes.Buffer) defer func() { buf.Reset() consoleBufPool.Put(buf) }() var evt map[string]interface{} p = decodeIfBinaryToBytes(p) d := json.NewDecoder(bytes.NewReader(p)) d.UseNumber() err = d.Decode(&evt) if err != nil { return n, fmt.Errorf("cannot decode event: %s", err) } for _, p := range w.PartsOrder { w.writePart(buf, evt, p) } w.writeFields(evt, buf) err = buf.WriteByte('\n') if err != nil { return n, err } _, err = buf.WriteTo(w.Out) return len(p), err } // writeFields appends formatted key-value pairs to buf. func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { var fields = make([]string, 0, len(evt)) for field := range evt { switch field { case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: continue } fields = append(fields, field) } sort.Strings(fields) if len(fields) > 0 { buf.WriteByte(' ') } // Move the "error" field to the front ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) if ei < len(fields) && fields[ei] == ErrorFieldName { fields[ei] = "" fields = append([]string{ErrorFieldName}, fields...) var xfields = make([]string, 0, len(fields)) for _, field := range fields { if field == "" { // Skip empty fields continue } xfields = append(xfields, field) } fields = xfields } for i, field := range fields { var fn Formatter var fv Formatter if field == ErrorFieldName { if w.FormatErrFieldName == nil { fn = consoleDefaultFormatErrFieldName(w.NoColor) } else { fn = w.FormatErrFieldName } if w.FormatErrFieldValue == nil { fv = consoleDefaultFormatErrFieldValue(w.NoColor) } else { fv = w.FormatErrFieldValue } } else { if w.FormatFieldName == nil { fn = consoleDefaultFormatFieldName(w.NoColor) } else { fn = w.FormatFieldName } if w.FormatFieldValue == nil { fv = consoleDefaultFormatFieldValue } else { fv = w.FormatFieldValue } } buf.WriteString(fn(field)) switch fValue := evt[field].(type) { case string: if needsQuote(fValue) { buf.WriteString(fv(strconv.Quote(fValue))) } else { buf.WriteString(fv(fValue)) } case json.Number: buf.WriteString(fv(fValue)) default: b, err := json.Marshal(fValue) if err != nil { fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) } else { fmt.Fprint(buf, fv(b)) } } if i < len(fields)-1 { // Skip space for last field buf.WriteByte(' ') } } } // writePart appends a formatted part to buf. func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { var f Formatter switch p { case LevelFieldName: if w.FormatLevel == nil { f = consoleDefaultFormatLevel(w.NoColor) } else { f = w.FormatLevel } case TimestampFieldName: if w.FormatTimestamp == nil { f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) } else { f = w.FormatTimestamp } case MessageFieldName: if w.FormatMessage == nil { f = consoleDefaultFormatMessage } else { f = w.FormatMessage } case CallerFieldName: if w.FormatCaller == nil { f = consoleDefaultFormatCaller(w.NoColor) } else { f = w.FormatCaller } default: if w.FormatFieldValue == nil { f = consoleDefaultFormatFieldValue } else { f = w.FormatFieldValue } } var s = f(evt[p]) if len(s) > 0 { buf.WriteString(s) if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part buf.WriteByte(' ') } } } // needsQuote returns true when the string s should be quoted in output. func needsQuote(s string) bool { for i := range s { if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { return true } } return false } // colorize returns the string s wrapped in ANSI code c, unless disabled is true. func colorize(s interface{}, c int, disabled bool) string { if disabled { return fmt.Sprintf("%s", s) } return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) } // ----- DEFAULT FORMATTERS --------------------------------------------------- func consoleDefaultPartsOrder() []string { return []string{ TimestampFieldName, LevelFieldName, CallerFieldName, MessageFieldName, } } func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { if timeFormat == "" { timeFormat = consoleDefaultTimeFormat } return func(i interface{}) string { t := "<nil>" switch tt := i.(type) { case string: ts, err := time.Parse(TimeFieldFormat, tt) if err != nil { t = tt } else { t = ts.Format(timeFormat) } case json.Number: i, err := tt.Int64() if err != nil { t = tt.String() } else { var sec, nsec int64 = i, 0 switch TimeFieldFormat { case TimeFormatUnixMs: nsec = int64(time.Duration(i) * time.Millisecond) sec = 0 case TimeFormatUnixMicro: nsec = int64(time.Duration(i) * time.Microsecond) sec = 0 } ts := time.Unix(sec, nsec).UTC() t = ts.Format(timeFormat) } } return colorize(t, colorDarkGray, noColor) } } func consoleDefaultFormatLevel(noColor bool) Formatter { return func(i interface{}) string { var l string if ll, ok := i.(string); ok { switch ll { case "trace": l = colorize("TRC", colorMagenta, noColor) case "debug": l = colorize("DBG", colorYellow, noColor) case "info": l = colorize("INF", colorGreen, noColor) case "warn": l = colorize("WRN", colorRed, noColor) case "error": l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) case "fatal": l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) case "panic": l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) default: l = colorize("???", colorBold, noColor) } } else { if i == nil { l = colorize("???", colorBold, noColor) } else { l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] } } return l } } func consoleDefaultFormatCaller(noColor bool) Formatter { return func(i interface{}) string { var c string if cc, ok := i.(string); ok { c = cc } if len(c) > 0 { cwd, err := os.Getwd() if err == nil { c = strings.TrimPrefix(c, cwd) c = strings.TrimPrefix(c, "/") } c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) } return c } } func consoleDefaultFormatMessage(i interface{}) string { if i == nil { return "" } return fmt.Sprintf("%s", i) } func consoleDefaultFormatFieldName(noColor bool) Formatter { return func(i interface{}) string { return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) } } func consoleDefaultFormatFieldValue(i interface{}) string { return fmt.Sprintf("%s", i) } func consoleDefaultFormatErrFieldName(noColor bool) Formatter { return func(i interface{}) string { return colorize(fmt.Sprintf("%s=", i), colorRed, noColor) } } func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { return func(i interface{}) string { return colorize(fmt.Sprintf("%s", i), colorRed, noColor) } }
8,769
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/log.go
// Package zerolog provides a lightweight logging library dedicated to JSON logging. // // A global Logger can be use for simple logging: // // import "github.com/rs/zerolog/log" // // log.Info().Msg("hello world") // // Output: {"time":1494567715,"level":"info","message":"hello world"} // // NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". // // Fields can be added to log messages: // // log.Info().Str("foo", "bar").Msg("hello world") // // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} // // Create logger instance to manage different outputs: // // logger := zerolog.New(os.Stderr).With().Timestamp().Logger() // logger.Info(). // Str("foo", "bar"). // Msg("hello world") // // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} // // Sub-loggers let you chain loggers with additional context: // // sublogger := log.With().Str("component": "foo").Logger() // sublogger.Info().Msg("hello world") // // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} // // Level logging // // zerolog.SetGlobalLevel(zerolog.InfoLevel) // // log.Debug().Msg("filtered out message") // log.Info().Msg("routed message") // // if e := log.Debug(); e.Enabled() { // // Compute log output only if enabled. // value := compute() // e.Str("foo": value).Msg("some debug message") // } // // Output: {"level":"info","time":1494567715,"routed message"} // // Customize automatic field names: // // log.TimestampFieldName = "t" // log.LevelFieldName = "p" // log.MessageFieldName = "m" // // log.Info().Msg("hello world") // // Output: {"t":1494567715,"p":"info","m":"hello world"} // // Log with no level and message: // // log.Log().Str("foo","bar").Msg("") // // Output: {"time":1494567715,"foo":"bar"} // // Add contextual fields to global Logger: // // log.Logger = log.With().Str("foo", "bar").Logger() // // Sample logs: // // sampled := log.Sample(&zerolog.BasicSampler{N: 10}) // sampled.Info().Msg("will be logged every 10 messages") // // Log with contextual hooks: // // // Create the hook: // type SeverityHook struct{} // // func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { // if level != zerolog.NoLevel { // e.Str("severity", level.String()) // } // } // // // And use it: // var h SeverityHook // log := zerolog.New(os.Stdout).Hook(h) // log.Warn().Msg("") // // Output: {"level":"warn","severity":"warn"} // // // Caveats // // There is no fields deduplication out-of-the-box. // Using the same key multiple times creates new key in final JSON each time. // // logger := zerolog.New(os.Stderr).With().Timestamp().Logger() // logger.Info(). // Timestamp(). // Msg("dup") // // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} // // In this case, many consumers will take the last value, // but this is not guaranteed; check yours if in doubt. package zerolog import ( "fmt" "io" "io/ioutil" "os" "strconv" ) // Level defines log levels. type Level int8 const ( // DebugLevel defines debug log level. DebugLevel Level = iota // InfoLevel defines info log level. InfoLevel // WarnLevel defines warn log level. WarnLevel // ErrorLevel defines error log level. ErrorLevel // FatalLevel defines fatal log level. FatalLevel // PanicLevel defines panic log level. PanicLevel // NoLevel defines an absent log level. NoLevel // Disabled disables the logger. Disabled // TraceLevel defines trace log level. TraceLevel Level = -1 ) func (l Level) String() string { switch l { case TraceLevel: return "trace" case DebugLevel: return "debug" case InfoLevel: return "info" case WarnLevel: return "warn" case ErrorLevel: return "error" case FatalLevel: return "fatal" case PanicLevel: return "panic" case NoLevel: return "" } return "" } // ParseLevel converts a level string into a zerolog Level value. // returns an error if the input string does not match known values. func ParseLevel(levelStr string) (Level, error) { switch levelStr { case LevelFieldMarshalFunc(TraceLevel): return TraceLevel, nil case LevelFieldMarshalFunc(DebugLevel): return DebugLevel, nil case LevelFieldMarshalFunc(InfoLevel): return InfoLevel, nil case LevelFieldMarshalFunc(WarnLevel): return WarnLevel, nil case LevelFieldMarshalFunc(ErrorLevel): return ErrorLevel, nil case LevelFieldMarshalFunc(FatalLevel): return FatalLevel, nil case LevelFieldMarshalFunc(PanicLevel): return PanicLevel, nil case LevelFieldMarshalFunc(NoLevel): return NoLevel, nil } return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) } // A Logger represents an active logging object that generates lines // of JSON output to an io.Writer. Each logging operation makes a single // call to the Writer's Write method. There is no guarantee on access // serialization to the Writer. If your Writer is not thread safe, // you may consider a sync wrapper. type Logger struct { w LevelWriter level Level sampler Sampler context []byte hooks []Hook } // New creates a root logger with given output writer. If the output writer implements // the LevelWriter interface, the WriteLevel method will be called instead of the Write // one. // // Each logging operation makes a single call to the Writer's Write method. There is no // guarantee on access serialization to the Writer. If your Writer is not thread safe, // you may consider using sync wrapper. func New(w io.Writer) Logger { if w == nil { w = ioutil.Discard } lw, ok := w.(LevelWriter) if !ok { lw = levelWriterAdapter{w} } return Logger{w: lw, level: TraceLevel} } // Nop returns a disabled logger for which all operation are no-op. func Nop() Logger { return New(nil).Level(Disabled) } // Output duplicates the current logger and sets w as its output. func (l Logger) Output(w io.Writer) Logger { l2 := New(w) l2.level = l.level l2.sampler = l.sampler if len(l.hooks) > 0 { l2.hooks = append(l2.hooks, l.hooks...) } if l.context != nil { l2.context = make([]byte, len(l.context), cap(l.context)) copy(l2.context, l.context) } return l2 } // With creates a child logger with the field added to its context. func (l Logger) With() Context { context := l.context l.context = make([]byte, 0, 500) if context != nil { l.context = append(l.context, context...) } else { // This is needed for AppendKey to not check len of input // thus making it inlinable l.context = enc.AppendBeginMarker(l.context) } return Context{l} } // UpdateContext updates the internal logger's context. // // Use this method with caution. If unsure, prefer the With method. func (l *Logger) UpdateContext(update func(c Context) Context) { if l == disabledLogger { return } if cap(l.context) == 0 { l.context = make([]byte, 0, 500) } c := update(Context{*l}) l.context = c.l.context } // Level creates a child logger with the minimum accepted level set to level. func (l Logger) Level(lvl Level) Logger { l.level = lvl return l } // GetLevel returns the current Level of l. func (l Logger) GetLevel() Level { return l.level } // Sample returns a logger with the s sampler. func (l Logger) Sample(s Sampler) Logger { l.sampler = s return l } // Hook returns a logger with the h Hook. func (l Logger) Hook(h Hook) Logger { l.hooks = append(l.hooks, h) return l } // Trace starts a new message with trace level. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Trace() *Event { return l.newEvent(TraceLevel, nil) } // Debug starts a new message with debug level. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Debug() *Event { return l.newEvent(DebugLevel, nil) } // Info starts a new message with info level. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Info() *Event { return l.newEvent(InfoLevel, nil) } // Warn starts a new message with warn level. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Warn() *Event { return l.newEvent(WarnLevel, nil) } // Error starts a new message with error level. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Error() *Event { return l.newEvent(ErrorLevel, nil) } // Err starts a new message with error level with err as a field if not nil or // with info level if err is nil. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Err(err error) *Event { if err != nil { return l.Error().Err(err) } return l.Info() } // Fatal starts a new message with fatal level. The os.Exit(1) function // is called by the Msg method, which terminates the program immediately. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Fatal() *Event { return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) } // Panic starts a new message with panic level. The panic() function // is called by the Msg method, which stops the ordinary flow of a goroutine. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Panic() *Event { return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) } // WithLevel starts a new message with level. Unlike Fatal and Panic // methods, WithLevel does not terminate the program or stop the ordinary // flow of a gourotine when used with their respective levels. // // You must call Msg on the returned event in order to send the event. func (l *Logger) WithLevel(level Level) *Event { switch level { case TraceLevel: return l.Trace() case DebugLevel: return l.Debug() case InfoLevel: return l.Info() case WarnLevel: return l.Warn() case ErrorLevel: return l.Error() case FatalLevel: return l.newEvent(FatalLevel, nil) case PanicLevel: return l.newEvent(PanicLevel, nil) case NoLevel: return l.Log() case Disabled: return nil default: panic("zerolog: WithLevel(): invalid level: " + strconv.Itoa(int(level))) } } // Log starts a new message with no level. Setting GlobalLevel to Disabled // will still disable events produced by this method. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Log() *Event { return l.newEvent(NoLevel, nil) } // Print sends a log event using debug level and no extra field. // Arguments are handled in the manner of fmt.Print. func (l *Logger) Print(v ...interface{}) { if e := l.Debug(); e.Enabled() { e.Msg(fmt.Sprint(v...)) } } // Printf sends a log event using debug level and no extra field. // Arguments are handled in the manner of fmt.Printf. func (l *Logger) Printf(format string, v ...interface{}) { if e := l.Debug(); e.Enabled() { e.Msg(fmt.Sprintf(format, v...)) } } // Write implements the io.Writer interface. This is useful to set as a writer // for the standard library log. func (l Logger) Write(p []byte) (n int, err error) { n = len(p) if n > 0 && p[n-1] == '\n' { // Trim CR added by stdlog. p = p[0 : n-1] } l.Log().Msg(string(p)) return } func (l *Logger) newEvent(level Level, done func(string)) *Event { enabled := l.should(level) if !enabled { return nil } e := newEvent(l.w, level) e.done = done e.ch = l.hooks if level != NoLevel { e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) } if l.context != nil && len(l.context) > 1 { e.buf = enc.AppendObjectData(e.buf, l.context) } return e } // should returns true if the log event should be logged. func (l *Logger) should(lvl Level) bool { if lvl < l.level || lvl < GlobalLevel() { return false } if l.sampler != nil && !samplingDisabled() { return l.sampler.Sample(lvl) } return true }
8,770
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/README.md
# Zero Allocation JSON Logger [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) The zerolog package provides a fast and simple logger dedicated to JSON output. Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). ![Pretty Logging Image](pretty.png) ## Who uses zerolog Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. ## Features * Blazing fast * Low to zero allocation * Level logging * Sampling * Hooks * Contextual fields * `context.Context` integration * `net/http` helpers * JSON and CBOR encoding formats * Pretty logging for development ## Installation ```bash go get -u github.com/rs/zerolog/log ``` ## Getting Started ### Simple Logging Example For simple logging, import the global logger package **github.com/rs/zerolog/log** ```go package main import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { // UNIX Time is faster and smaller than most timestamps // If you set zerolog.TimeFieldFormat to an empty string, // logs will write with UNIX time zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Print("hello world") } // Output: {"time":1516134303,"level":"debug","message":"hello world"} ``` > Note: By default log writes to `os.Stderr` > Note: The default log level for `log.Print` is *debug* ### Contextual Logging **zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: ```go package main import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Debug(). Str("Scale", "833 cents"). Float64("Interval", 833.09). Msg("Fibonacci is everywhere") log.Debug(). Str("Name", "Tom"). Send() } // Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} // Output: {"level":"debug","Name":"Tom","time":1562212768} ``` > You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) ### Leveled Logging #### Simple Leveled Logging Example ```go package main import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Info().Msg("hello world") } // Output: {"time":1516134303,"level":"info","message":"hello world"} ``` > It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. **zerolog** allows for logging at the following levels (from highest to lowest): * panic (`zerolog.PanicLevel`, 5) * fatal (`zerolog.FatalLevel`, 4) * error (`zerolog.ErrorLevel`, 3) * warn (`zerolog.WarnLevel`, 2) * info (`zerolog.InfoLevel`, 1) * debug (`zerolog.DebugLevel`, 0) * trace (`zerolog.TraceLevel`, -1) You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. #### Setting Global Log Level This example uses command-line flags to demonstrate various outputs depending on the chosen log level. ```go package main import ( "flag" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix debug := flag.Bool("debug", false, "sets log level to debug") flag.Parse() // Default level for this example is info, unless debug flag is present zerolog.SetGlobalLevel(zerolog.InfoLevel) if *debug { zerolog.SetGlobalLevel(zerolog.DebugLevel) } log.Debug().Msg("This message appears only when log level set to Debug") log.Info().Msg("This message appears when log level set to Debug or Info") if e := log.Debug(); e.Enabled() { // Compute log output only if enabled. value := "bar" e.Str("foo", value).Msg("some debug message") } } ``` Info Output (no flag) ```bash $ ./logLevelExample {"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} ``` Debug Output (debug flag set) ```bash $ ./logLevelExample -debug {"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} {"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} {"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} ``` #### Logging without Level or Message You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. ```go package main import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Log(). Str("foo", "bar"). Msg("") } // Output: {"time":1494567715,"foo":"bar"} ``` #### Logging Fatal Messages ```go package main import ( "errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) func main() { err := errors.New("A repo man spends his life getting into tense situations") service := "myservice" zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Fatal(). Err(err). Str("service", service). Msgf("Cannot start %s", service) } // Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} // exit status 1 ``` > NOTE: Using `Msgf` generates one allocation even when the logger is disabled. ### Create logger instance to manage different outputs ```go logger := zerolog.New(os.Stderr).With().Timestamp().Logger() logger.Info().Str("foo", "bar").Msg("hello world") // Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} ``` ### Sub-loggers let you chain loggers with additional context ```go sublogger := log.With(). Str("component", "foo"). Logger() sublogger.Info().Msg("hello world") // Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} ``` ### Pretty logging To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: ```go log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) log.Info().Str("foo", "bar").Msg("Hello world") // Output: 3:04PM INF Hello World foo=bar ``` To customize the configuration and formatting: ```go output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} output.FormatLevel = func(i interface{}) string { return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) } output.FormatMessage = func(i interface{}) string { return fmt.Sprintf("***%s****", i) } output.FormatFieldName = func(i interface{}) string { return fmt.Sprintf("%s:", i) } output.FormatFieldValue = func(i interface{}) string { return strings.ToUpper(fmt.Sprintf("%s", i)) } log := zerolog.New(output).With().Timestamp().Logger() log.Info().Str("foo", "bar").Msg("Hello World") // Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR ``` ### Sub dictionary ```go log.Info(). Str("foo", "bar"). Dict("dict", zerolog.Dict(). Str("bar", "baz"). Int("n", 1), ).Msg("hello world") // Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} ``` ### Customize automatic field names ```go zerolog.TimestampFieldName = "t" zerolog.LevelFieldName = "l" zerolog.MessageFieldName = "m" log.Info().Msg("hello world") // Output: {"l":"info","t":1494567715,"m":"hello world"} ``` ### Add contextual fields to the global logger ```go log.Logger = log.With().Str("foo", "bar").Logger() ``` ### Add file and line number to log ```go log.Logger = log.With().Caller().Logger() log.Info().Msg("hello world") // Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} ``` ### Thread-safe, lock-free, non-blocking writer If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow: ```go wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { fmt.Printf("Logger Dropped %d messages", missed) }) log := zerolog.New(wr) log.Print("test") ``` You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. ### Log Sampling ```go sampled := log.Sample(&zerolog.BasicSampler{N: 10}) sampled.Info().Msg("will be logged every 10 messages") // Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} ``` More advanced sampling: ```go // Will let 5 debug messages per period of 1 second. // Over 5 debug message, 1 every 100 debug messages are logged. // Other levels are not sampled. sampled := log.Sample(zerolog.LevelSampler{ DebugSampler: &zerolog.BurstSampler{ Burst: 5, Period: 1*time.Second, NextSampler: &zerolog.BasicSampler{N: 100}, }, }) sampled.Debug().Msg("hello world") // Output: {"time":1494567715,"level":"debug","message":"hello world"} ``` ### Hooks ```go type SeverityHook struct{} func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { if level != zerolog.NoLevel { e.Str("severity", level.String()) } } hooked := log.Hook(SeverityHook{}) hooked.Warn().Msg("") // Output: {"level":"warn","severity":"warn"} ``` ### Pass a sub-logger by context ```go ctx := log.With().Str("component", "module").Logger().WithContext(ctx) log.Ctx(ctx).Info().Msg("hello world") // Output: {"component":"module","level":"info","message":"hello world"} ``` ### Set as standard logger output ```go log := zerolog.New(os.Stdout).With(). Str("foo", "bar"). Logger() stdlog.SetFlags(0) stdlog.SetOutput(log) stdlog.Print("hello world") // Output: {"foo":"bar","message":"hello world"} ``` ### Integration with `net/http` The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. ```go log := zerolog.New(os.Stdout).With(). Timestamp(). Str("role", "my-service"). Str("host", host). Logger() c := alice.New() // Install the logger handler with default output on the console c = c.Append(hlog.NewHandler(log)) // Install some provided extra handler to set some request's context fields. // Thanks to that handler, all our logs will come with some prepopulated fields. c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { hlog.FromRequest(r).Info(). Str("method", r.Method). Stringer("url", r.URL). Int("status", status). Int("size", size). Dur("duration", duration). Msg("") })) c = c.Append(hlog.RemoteAddrHandler("ip")) c = c.Append(hlog.UserAgentHandler("user_agent")) c = c.Append(hlog.RefererHandler("referer")) c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) // Here is your final handler h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Get the logger from the request's context. You can safely assume it // will be always there: if the handler is removed, hlog.FromRequest // will return a no-op logger. hlog.FromRequest(r).Info(). Str("user", "current user"). Str("status", "ok"). Msg("Something happened") // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} })) http.Handle("/", h) if err := http.ListenAndServe(":8080", nil); err != nil { log.Fatal().Err(err).Msg("Startup failed") } ``` ## Multiple Log Output `zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. ```go func main() { consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) logger := zerolog.New(multi).With().Timestamp().Logger() logger.Info().Msg("Hello World!") } // Output (Line 1: Console; Line 2: Stdout) // 12:36PM INF Hello World! // {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} ``` ## Global Settings Some settings can be changed and will by applied to all loggers: * `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). * `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). * `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. * `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. * `zerolog.LevelFieldName`: Can be set to customize level field name. * `zerolog.MessageFieldName`: Can be set to customize message field name. * `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. * `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. * `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). * `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). * `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. ## Field Types ### Standard Types * `Str` * `Bool` * `Int`, `Int8`, `Int16`, `Int32`, `Int64` * `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` * `Float32`, `Float64` ### Advanced Fields * `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. * `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. * `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. * `Dur`: Adds a field with `time.Duration`. * `Dict`: Adds a sub-key/value as a field of the event. * `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) * `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) * `Interface`: Uses reflection to marshal the type. Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) ## Binary Encoding In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: ```bash go build -tags binary_log . ``` To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work with zerolog library is [CSD](https://github.com/toravir/csd/). ## Related Projects * [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` ## Benchmarks See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. All operations are allocation free (those numbers *include* JSON encoding): ```text BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op ``` There are a few Go logging benchmarks and comparisons that include zerolog. * [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) * [uber-common/zap](https://github.com/uber-go/zap#performance) Using Uber's zap comparison benchmark: Log a message and 10 fields: | Library | Time | Bytes Allocated | Objects Allocated | | :--- | :---: | :---: | :---: | | zerolog | 767 ns/op | 552 B/op | 6 allocs/op | | :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | | :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | | go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | | lion | 5392 ns/op | 5807 B/op | 63 allocs/op | | logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | | apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | | log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | Log a message with a logger that already has 10 fields of context: | Library | Time | Bytes Allocated | Objects Allocated | | :--- | :---: | :---: | :---: | | zerolog | 52 ns/op | 0 B/op | 0 allocs/op | | :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | | :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | | lion | 2702 ns/op | 4074 B/op | 38 allocs/op | | go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | | logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | | apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | | log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | Log a static string, without any context or `printf`-style templating: | Library | Time | Bytes Allocated | Objects Allocated | | :--- | :---: | :---: | :---: | | zerolog | 50 ns/op | 0 B/op | 0 allocs/op | | :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | | standard library | 453 ns/op | 80 B/op | 2 allocs/op | | :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | | go-kit | 508 ns/op | 656 B/op | 13 allocs/op | | lion | 771 ns/op | 1224 B/op | 10 allocs/op | | logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | | apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | | log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | ## Caveats Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: ```go logger := zerolog.New(os.Stderr).With().Timestamp().Logger() logger.Info(). Timestamp(). Msg("dup") // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} ``` In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
8,771
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/encoder.go
package zerolog import ( "net" "time" ) type encoder interface { AppendArrayDelim(dst []byte) []byte AppendArrayEnd(dst []byte) []byte AppendArrayStart(dst []byte) []byte AppendBeginMarker(dst []byte) []byte AppendBool(dst []byte, val bool) []byte AppendBools(dst []byte, vals []bool) []byte AppendBytes(dst, s []byte) []byte AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte AppendEndMarker(dst []byte) []byte AppendFloat32(dst []byte, val float32) []byte AppendFloat64(dst []byte, val float64) []byte AppendFloats32(dst []byte, vals []float32) []byte AppendFloats64(dst []byte, vals []float64) []byte AppendHex(dst, s []byte) []byte AppendIPAddr(dst []byte, ip net.IP) []byte AppendIPPrefix(dst []byte, pfx net.IPNet) []byte AppendInt(dst []byte, val int) []byte AppendInt16(dst []byte, val int16) []byte AppendInt32(dst []byte, val int32) []byte AppendInt64(dst []byte, val int64) []byte AppendInt8(dst []byte, val int8) []byte AppendInterface(dst []byte, i interface{}) []byte AppendInts(dst []byte, vals []int) []byte AppendInts16(dst []byte, vals []int16) []byte AppendInts32(dst []byte, vals []int32) []byte AppendInts64(dst []byte, vals []int64) []byte AppendInts8(dst []byte, vals []int8) []byte AppendKey(dst []byte, key string) []byte AppendLineBreak(dst []byte) []byte AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte AppendNil(dst []byte) []byte AppendObjectData(dst []byte, o []byte) []byte AppendString(dst []byte, s string) []byte AppendStrings(dst []byte, vals []string) []byte AppendTime(dst []byte, t time.Time, format string) []byte AppendTimes(dst []byte, vals []time.Time, format string) []byte AppendUint(dst []byte, val uint) []byte AppendUint16(dst []byte, val uint16) []byte AppendUint32(dst []byte, val uint32) []byte AppendUint64(dst []byte, val uint64) []byte AppendUint8(dst []byte, val uint8) []byte AppendUints(dst []byte, vals []uint) []byte AppendUints16(dst []byte, vals []uint16) []byte AppendUints32(dst []byte, vals []uint32) []byte AppendUints64(dst []byte, vals []uint64) []byte AppendUints8(dst []byte, vals []uint8) []byte }
8,772
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/LICENSE
MIT License Copyright (c) 2017 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8,773
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/hook.go
package zerolog // Hook defines an interface to a log hook. type Hook interface { // Run runs the hook with the event. Run(e *Event, level Level, message string) } // HookFunc is an adaptor to allow the use of an ordinary function // as a Hook. type HookFunc func(e *Event, level Level, message string) // Run implements the Hook interface. func (h HookFunc) Run(e *Event, level Level, message string) { h(e, level, message) } // LevelHook applies a different hook for each level. type LevelHook struct { NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook } // Run implements the Hook interface. func (h LevelHook) Run(e *Event, level Level, message string) { switch level { case TraceLevel: if h.TraceHook != nil { h.TraceHook.Run(e, level, message) } case DebugLevel: if h.DebugHook != nil { h.DebugHook.Run(e, level, message) } case InfoLevel: if h.InfoHook != nil { h.InfoHook.Run(e, level, message) } case WarnLevel: if h.WarnHook != nil { h.WarnHook.Run(e, level, message) } case ErrorLevel: if h.ErrorHook != nil { h.ErrorHook.Run(e, level, message) } case FatalLevel: if h.FatalHook != nil { h.FatalHook.Run(e, level, message) } case PanicLevel: if h.PanicHook != nil { h.PanicHook.Run(e, level, message) } case NoLevel: if h.NoLevelHook != nil { h.NoLevelHook.Run(e, level, message) } } } // NewLevelHook returns a new LevelHook. func NewLevelHook() LevelHook { return LevelHook{} }
8,774
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/ctx.go
package zerolog import ( "context" ) var disabledLogger *Logger func init() { SetGlobalLevel(TraceLevel) l := Nop() disabledLogger = &l } type ctxKey struct{} // WithContext returns a copy of ctx with l associated. If an instance of Logger // is already in the context, the context is not updated. // // For instance, to add a field to an existing logger in the context, use this // notation: // // ctx := r.Context() // l := zerolog.Ctx(ctx) // l.UpdateContext(func(c Context) Context { // return c.Str("bar", "baz") // }) func (l *Logger) WithContext(ctx context.Context) context.Context { if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { if lp == l { // Do not store same logger. return ctx } } else if l.level == Disabled { // Do not store disabled logger. return ctx } return context.WithValue(ctx, ctxKey{}, l) } // Ctx returns the Logger associated with the ctx. If no logger // is associated, a disabled logger is returned. func Ctx(ctx context.Context) *Logger { if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { return l } return disabledLogger }
8,775
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/writer.go
package zerolog import ( "io" "sync" ) // LevelWriter defines as interface a writer may implement in order // to receive level information with payload. type LevelWriter interface { io.Writer WriteLevel(level Level, p []byte) (n int, err error) } type levelWriterAdapter struct { io.Writer } func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { return lw.Write(p) } type syncWriter struct { mu sync.Mutex lw LevelWriter } // SyncWriter wraps w so that each call to Write is synchronized with a mutex. // This syncer can be used to wrap the call to writer's Write method if it is // not thread safe. Note that you do not need this wrapper for os.File Write // operations on POSIX and Windows systems as they are already thread-safe. func SyncWriter(w io.Writer) io.Writer { if lw, ok := w.(LevelWriter); ok { return &syncWriter{lw: lw} } return &syncWriter{lw: levelWriterAdapter{w}} } // Write implements the io.Writer interface. func (s *syncWriter) Write(p []byte) (n int, err error) { s.mu.Lock() defer s.mu.Unlock() return s.lw.Write(p) } // WriteLevel implements the LevelWriter interface. func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { s.mu.Lock() defer s.mu.Unlock() return s.lw.WriteLevel(l, p) } type multiLevelWriter struct { writers []LevelWriter } func (t multiLevelWriter) Write(p []byte) (n int, err error) { for _, w := range t.writers { n, err = w.Write(p) if err != nil { return } if n != len(p) { err = io.ErrShortWrite return } } return len(p), nil } func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { for _, w := range t.writers { n, err = w.WriteLevel(l, p) if err != nil { return } if n != len(p) { err = io.ErrShortWrite return } } return len(p), nil } // MultiLevelWriter creates a writer that duplicates its writes to all the // provided writers, similar to the Unix tee(1) command. If some writers // implement LevelWriter, their WriteLevel method will be used instead of Write. func MultiLevelWriter(writers ...io.Writer) LevelWriter { lwriters := make([]LevelWriter, 0, len(writers)) for _, w := range writers { if lw, ok := w.(LevelWriter); ok { lwriters = append(lwriters, lw) } else { lwriters = append(lwriters, levelWriterAdapter{w}) } } return multiLevelWriter{lwriters} }
8,776
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/encoder_json.go
// +build !binary_log package zerolog // encoder_json.go file contains bindings to generate // JSON encoded byte stream. import ( "github.com/rs/zerolog/internal/json" ) var ( _ encoder = (*json.Encoder)(nil) enc = json.Encoder{} ) func appendJSON(dst []byte, j []byte) []byte { return append(dst, j...) } func decodeIfBinaryToString(in []byte) string { return string(in) } func decodeObjectToStr(in []byte) string { return string(in) } func decodeIfBinaryToBytes(in []byte) []byte { return in }
8,777
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/CNAME
zerolog.io
8,778
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/globals.go
package zerolog import ( "strconv" "sync/atomic" "time" ) const ( // TimeFormatUnix defines a time format that makes time fields to be // serialized as Unix timestamp integers. TimeFormatUnix = "" // TimeFormatUnixMs defines a time format that makes time fields to be // serialized as Unix timestamp integers in milliseconds. TimeFormatUnixMs = "UNIXMS" // TimeFormatUnixMicro defines a time format that makes time fields to be // serialized as Unix timestamp integers in microseconds. TimeFormatUnixMicro = "UNIXMICRO" ) var ( // TimestampFieldName is the field name used for the timestamp field. TimestampFieldName = "time" // LevelFieldName is the field name used for the level field. LevelFieldName = "level" // LevelFieldMarshalFunc allows customization of global level field marshaling LevelFieldMarshalFunc = func(l Level) string { return l.String() } // MessageFieldName is the field name used for the message field. MessageFieldName = "message" // ErrorFieldName is the field name used for error fields. ErrorFieldName = "error" // CallerFieldName is the field name used for caller field. CallerFieldName = "caller" // CallerSkipFrameCount is the number of stack frames to skip to find the caller. CallerSkipFrameCount = 2 // CallerMarshalFunc allows customization of global caller marshaling CallerMarshalFunc = func(file string, line int) string { return file + ":" + strconv.Itoa(line) } // ErrorStackFieldName is the field name used for error stacks. ErrorStackFieldName = "stack" // ErrorStackMarshaler extract the stack from err if any. ErrorStackMarshaler func(err error) interface{} // ErrorMarshalFunc allows customization of global error marshaling ErrorMarshalFunc = func(err error) interface{} { return err } // TimeFieldFormat defines the time format of the Time field type. If set to // TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX // timestamp as integer. TimeFieldFormat = time.RFC3339 // TimestampFunc defines the function called to generate a timestamp. TimestampFunc = time.Now // DurationFieldUnit defines the unit for time.Duration type fields added // using the Dur method. DurationFieldUnit = time.Millisecond // DurationFieldInteger renders Dur fields as integer instead of float if // set to true. DurationFieldInteger = false // ErrorHandler is called whenever zerolog fails to write an event on its // output. If not set, an error is printed on the stderr. This handler must // be thread safe and non-blocking. ErrorHandler func(err error) ) var ( gLevel = new(int32) disableSampling = new(int32) ) // SetGlobalLevel sets the global override for log level. If this // values is raised, all Loggers will use at least this value. // // To globally disable logs, set GlobalLevel to Disabled. func SetGlobalLevel(l Level) { atomic.StoreInt32(gLevel, int32(l)) } // GlobalLevel returns the current global log level func GlobalLevel() Level { return Level(atomic.LoadInt32(gLevel)) } // DisableSampling will disable sampling in all Loggers if true. func DisableSampling(v bool) { var i int32 if v { i = 1 } atomic.StoreInt32(disableSampling, i) } func samplingDisabled() bool { return atomic.LoadInt32(disableSampling) == 1 }
8,779
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/event.go
package zerolog import ( "fmt" "net" "os" "runtime" "sync" "time" ) var eventPool = &sync.Pool{ New: func() interface{} { return &Event{ buf: make([]byte, 0, 500), } }, } // Event represents a log event. It is instanced by one of the level method of // Logger and finalized by the Msg or Msgf method. type Event struct { buf []byte w LevelWriter level Level done func(msg string) stack bool // enable error stack trace ch []Hook // hooks from context } func putEvent(e *Event) { // Proper usage of a sync.Pool requires each entry to have approximately // the same memory cost. To obtain this property when the stored type // contains a variably-sized buffer, we add a hard limit on the maximum buffer // to place back in the pool. // // See https://golang.org/issue/23199 const maxSize = 1 << 16 // 64KiB if cap(e.buf) > maxSize { return } eventPool.Put(e) } // LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface // to be implemented by types used with Event/Context's Object methods. type LogObjectMarshaler interface { MarshalZerologObject(e *Event) } // LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface // to be implemented by types used with Event/Context's Array methods. type LogArrayMarshaler interface { MarshalZerologArray(a *Array) } func newEvent(w LevelWriter, level Level) *Event { e := eventPool.Get().(*Event) e.buf = e.buf[:0] e.ch = nil e.buf = enc.AppendBeginMarker(e.buf) e.w = w e.level = level e.stack = false return e } func (e *Event) write() (err error) { if e == nil { return nil } if e.level != Disabled { e.buf = enc.AppendEndMarker(e.buf) e.buf = enc.AppendLineBreak(e.buf) if e.w != nil { _, err = e.w.WriteLevel(e.level, e.buf) } } putEvent(e) return } // Enabled return false if the *Event is going to be filtered out by // log level or sampling. func (e *Event) Enabled() bool { return e != nil && e.level != Disabled } // Discard disables the event so Msg(f) won't print it. func (e *Event) Discard() *Event { if e == nil { return e } e.level = Disabled return nil } // Msg sends the *Event with msg added as the message field if not empty. // // NOTICE: once this method is called, the *Event should be disposed. // Calling Msg twice can have unexpected result. func (e *Event) Msg(msg string) { if e == nil { return } e.msg(msg) } // Send is equivalent to calling Msg(""). // // NOTICE: once this method is called, the *Event should be disposed. func (e *Event) Send() { if e == nil { return } e.msg("") } // Msgf sends the event with formatted msg added as the message field if not empty. // // NOTICE: once this method is called, the *Event should be disposed. // Calling Msgf twice can have unexpected result. func (e *Event) Msgf(format string, v ...interface{}) { if e == nil { return } e.msg(fmt.Sprintf(format, v...)) } func (e *Event) msg(msg string) { for _, hook := range e.ch { hook.Run(e, e.level, msg) } if msg != "" { e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) } if e.done != nil { defer e.done(msg) } if err := e.write(); err != nil { if ErrorHandler != nil { ErrorHandler(err) } else { fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) } } } // Fields is a helper function to use a map to set fields using type assertion. func (e *Event) Fields(fields map[string]interface{}) *Event { if e == nil { return e } e.buf = appendFields(e.buf, fields) return e } // Dict adds the field key with a dict to the event context. // Use zerolog.Dict() to create the dictionary. func (e *Event) Dict(key string, dict *Event) *Event { if e == nil { return e } dict.buf = enc.AppendEndMarker(dict.buf) e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) putEvent(dict) return e } // Dict creates an Event to be used with the *Event.Dict method. // Call usual field methods like Str, Int etc to add fields to this // event and give it as argument the *Event.Dict method. func Dict() *Event { return newEvent(nil, 0) } // Array adds the field key with an array to the event context. // Use zerolog.Arr() to create the array or pass a type that // implement the LogArrayMarshaler interface. func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { if e == nil { return e } e.buf = enc.AppendKey(e.buf, key) var a *Array if aa, ok := arr.(*Array); ok { a = aa } else { a = Arr() arr.MarshalZerologArray(a) } e.buf = a.write(e.buf) return e } func (e *Event) appendObject(obj LogObjectMarshaler) { e.buf = enc.AppendBeginMarker(e.buf) obj.MarshalZerologObject(e) e.buf = enc.AppendEndMarker(e.buf) } // Object marshals an object that implement the LogObjectMarshaler interface. func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { if e == nil { return e } e.buf = enc.AppendKey(e.buf, key) e.appendObject(obj) return e } // EmbedObject marshals an object that implement the LogObjectMarshaler interface. func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { if e == nil { return e } obj.MarshalZerologObject(e) return e } // Str adds the field key with val as a string to the *Event context. func (e *Event) Str(key, val string) *Event { if e == nil { return e } e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) return e } // Strs adds the field key with vals as a []string to the *Event context. func (e *Event) Strs(key string, vals []string) *Event { if e == nil { return e } e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) return e } // Stringer adds the field key with val.String() (or null if val is nil) to the *Event context. func (e *Event) Stringer(key string, val fmt.Stringer) *Event { if e == nil { return e } if val != nil { e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val.String()) return e } e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), nil) return e } // Bytes adds the field key with val as a string to the *Event context. // // Runes outside of normal ASCII ranges will be hex-encoded in the resulting // JSON. func (e *Event) Bytes(key string, val []byte) *Event { if e == nil { return e } e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) return e } // Hex adds the field key with val as a hex string to the *Event context. func (e *Event) Hex(key string, val []byte) *Event { if e == nil { return e } e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) return e } // RawJSON adds already encoded JSON to the log line under key. // // No sanity check is performed on b; it must not contain carriage returns and // be valid JSON. func (e *Event) RawJSON(key string, b []byte) *Event { if e == nil { return e } e.buf = appendJSON(enc.AppendKey(e.buf, key), b) return e } // AnErr adds the field key with serialized err to the *Event context. // If err is nil, no field is added. func (e *Event) AnErr(key string, err error) *Event { if e == nil { return e } switch m := ErrorMarshalFunc(err).(type) { case nil: return e case LogObjectMarshaler: return e.Object(key, m) case error: if m == nil || isNilValue(m) { return e } else { return e.Str(key, m.Error()) } case string: return e.Str(key, m) default: return e.Interface(key, m) } } // Errs adds the field key with errs as an array of serialized errors to the // *Event context. func (e *Event) Errs(key string, errs []error) *Event { if e == nil { return e } arr := Arr() for _, err := range errs { switch m := ErrorMarshalFunc(err).(type) { case LogObjectMarshaler: arr = arr.Object(m) case error: arr = arr.Err(m) case string: arr = arr.Str(m) default: arr = arr.Interface(m) } } return e.Array(key, arr) } // Err adds the field "error" with serialized err to the *Event context. // If err is nil, no field is added. // // To customize the key name, change zerolog.ErrorFieldName. // // If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, // the err is passed to ErrorStackMarshaler and the result is appended to the // zerolog.ErrorStackFieldName. func (e *Event) Err(err error) *Event { if e == nil { return e } if e.stack && ErrorStackMarshaler != nil { switch m := ErrorStackMarshaler(err).(type) { case nil: case LogObjectMarshaler: e.Object(ErrorStackFieldName, m) case error: if m != nil && !isNilValue(m) { e.Str(ErrorStackFieldName, m.Error()) } case string: e.Str(ErrorStackFieldName, m) default: e.Interface(ErrorStackFieldName, m) } } return e.AnErr(ErrorFieldName, err) } // Stack enables stack trace printing for the error passed to Err(). // // ErrorStackMarshaler must be set for this method to do something. func (e *Event) Stack() *Event { if e != nil { e.stack = true } return e } // Bool adds the field key with val as a bool to the *Event context. func (e *Event) Bool(key string, b bool) *Event { if e == nil { return e } e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) return e } // Bools adds the field key with val as a []bool to the *Event context. func (e *Event) Bools(key string, b []bool) *Event { if e == nil { return e } e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) return e } // Int adds the field key with i as a int to the *Event context. func (e *Event) Int(key string, i int) *Event { if e == nil { return e } e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) return e } // Ints adds the field key with i as a []int to the *Event context. func (e *Event) Ints(key string, i []int) *Event { if e == nil { return e } e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) return e } // Int8 adds the field key with i as a int8 to the *Event context. func (e *Event) Int8(key string, i int8) *Event { if e == nil { return e } e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) return e } // Ints8 adds the field key with i as a []int8 to the *Event context. func (e *Event) Ints8(key string, i []int8) *Event { if e == nil { return e } e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) return e } // Int16 adds the field key with i as a int16 to the *Event context. func (e *Event) Int16(key string, i int16) *Event { if e == nil { return e } e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) return e } // Ints16 adds the field key with i as a []int16 to the *Event context. func (e *Event) Ints16(key string, i []int16) *Event { if e == nil { return e } e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) return e } // Int32 adds the field key with i as a int32 to the *Event context. func (e *Event) Int32(key string, i int32) *Event { if e == nil { return e } e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) return e } // Ints32 adds the field key with i as a []int32 to the *Event context. func (e *Event) Ints32(key string, i []int32) *Event { if e == nil { return e } e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) return e } // Int64 adds the field key with i as a int64 to the *Event context. func (e *Event) Int64(key string, i int64) *Event { if e == nil { return e } e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) return e } // Ints64 adds the field key with i as a []int64 to the *Event context. func (e *Event) Ints64(key string, i []int64) *Event { if e == nil { return e } e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) return e } // Uint adds the field key with i as a uint to the *Event context. func (e *Event) Uint(key string, i uint) *Event { if e == nil { return e } e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) return e } // Uints adds the field key with i as a []int to the *Event context. func (e *Event) Uints(key string, i []uint) *Event { if e == nil { return e } e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) return e } // Uint8 adds the field key with i as a uint8 to the *Event context. func (e *Event) Uint8(key string, i uint8) *Event { if e == nil { return e } e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) return e } // Uints8 adds the field key with i as a []int8 to the *Event context. func (e *Event) Uints8(key string, i []uint8) *Event { if e == nil { return e } e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) return e } // Uint16 adds the field key with i as a uint16 to the *Event context. func (e *Event) Uint16(key string, i uint16) *Event { if e == nil { return e } e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) return e } // Uints16 adds the field key with i as a []int16 to the *Event context. func (e *Event) Uints16(key string, i []uint16) *Event { if e == nil { return e } e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) return e } // Uint32 adds the field key with i as a uint32 to the *Event context. func (e *Event) Uint32(key string, i uint32) *Event { if e == nil { return e } e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) return e } // Uints32 adds the field key with i as a []int32 to the *Event context. func (e *Event) Uints32(key string, i []uint32) *Event { if e == nil { return e } e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) return e } // Uint64 adds the field key with i as a uint64 to the *Event context. func (e *Event) Uint64(key string, i uint64) *Event { if e == nil { return e } e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) return e } // Uints64 adds the field key with i as a []int64 to the *Event context. func (e *Event) Uints64(key string, i []uint64) *Event { if e == nil { return e } e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) return e } // Float32 adds the field key with f as a float32 to the *Event context. func (e *Event) Float32(key string, f float32) *Event { if e == nil { return e } e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) return e } // Floats32 adds the field key with f as a []float32 to the *Event context. func (e *Event) Floats32(key string, f []float32) *Event { if e == nil { return e } e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) return e } // Float64 adds the field key with f as a float64 to the *Event context. func (e *Event) Float64(key string, f float64) *Event { if e == nil { return e } e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) return e } // Floats64 adds the field key with f as a []float64 to the *Event context. func (e *Event) Floats64(key string, f []float64) *Event { if e == nil { return e } e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) return e } // Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. // To customize the key name, change zerolog.TimestampFieldName. // // NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one // already. func (e *Event) Timestamp() *Event { if e == nil { return e } e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) return e } // Time adds the field key with t formated as string using zerolog.TimeFieldFormat. func (e *Event) Time(key string, t time.Time) *Event { if e == nil { return e } e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) return e } // Times adds the field key with t formated as string using zerolog.TimeFieldFormat. func (e *Event) Times(key string, t []time.Time) *Event { if e == nil { return e } e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) return e } // Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. // If zerolog.DurationFieldInteger is true, durations are rendered as integer // instead of float. func (e *Event) Dur(key string, d time.Duration) *Event { if e == nil { return e } e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } // Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. // If zerolog.DurationFieldInteger is true, durations are rendered as integer // instead of float. func (e *Event) Durs(key string, d []time.Duration) *Event { if e == nil { return e } e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } // TimeDiff adds the field key with positive duration between time t and start. // If time t is not greater than start, duration will be 0. // Duration format follows the same principle as Dur(). func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { if e == nil { return e } var d time.Duration if t.After(start) { d = t.Sub(start) } e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } // Interface adds the field key with i marshaled using reflection. func (e *Event) Interface(key string, i interface{}) *Event { if e == nil { return e } if obj, ok := i.(LogObjectMarshaler); ok { return e.Object(key, obj) } e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) return e } // Caller adds the file:line of the caller with the zerolog.CallerFieldName key. // The argument skip is the number of stack frames to ascend // Skip If not passed, use the global variable CallerSkipFrameCount func (e *Event) Caller(skip ...int) *Event { sk := CallerSkipFrameCount if len(skip) > 0 { sk = skip[0] + CallerSkipFrameCount } return e.caller(sk) } func (e *Event) caller(skip int) *Event { if e == nil { return e } _, file, line, ok := runtime.Caller(skip) if !ok { return e } e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line)) return e } // IPAddr adds IPv4 or IPv6 Address to the event func (e *Event) IPAddr(key string, ip net.IP) *Event { if e == nil { return e } e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) return e } // IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { if e == nil { return e } e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) return e } // MACAddr adds MAC address to the event func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { if e == nil { return e } e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) return e }
8,780
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/go.sum
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74 h1:4cFkmztxtMslUX2SctSl+blCyXfpzhGOy9LhKAqSMA4= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
8,781
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/_config.yml
remote_theme: rs/gh-readme
8,782
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/.travis.yml
language: go go: - "1.7" - "1.8" - "1.9" - "1.10" - "1.11" - "1.12" - "master" matrix: allow_failures: - go: "master" script: - go test -v -race -cpu=1,2,4 -bench . -benchmem ./... - go test -v -tags binary_log -race -cpu=1,2,4 -bench . -benchmem ./...
8,783
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/fields.go
package zerolog import ( "net" "sort" "time" "unsafe" ) func isNilValue(i interface{}) bool { return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 } func appendFields(dst []byte, fields map[string]interface{}) []byte { keys := make([]string, 0, len(fields)) for key := range fields { keys = append(keys, key) } sort.Strings(keys) for _, key := range keys { dst = enc.AppendKey(dst, key) val := fields[key] if val, ok := val.(LogObjectMarshaler); ok { e := newEvent(nil, 0) e.buf = e.buf[:0] e.appendObject(val) dst = append(dst, e.buf...) putEvent(e) continue } switch val := val.(type) { case string: dst = enc.AppendString(dst, val) case []byte: dst = enc.AppendBytes(dst, val) case error: switch m := ErrorMarshalFunc(val).(type) { case LogObjectMarshaler: e := newEvent(nil, 0) e.buf = e.buf[:0] e.appendObject(m) dst = append(dst, e.buf...) putEvent(e) case error: if m == nil || isNilValue(m) { dst = enc.AppendNil(dst) } else { dst = enc.AppendString(dst, m.Error()) } case string: dst = enc.AppendString(dst, m) default: dst = enc.AppendInterface(dst, m) } case []error: dst = enc.AppendArrayStart(dst) for i, err := range val { switch m := ErrorMarshalFunc(err).(type) { case LogObjectMarshaler: e := newEvent(nil, 0) e.buf = e.buf[:0] e.appendObject(m) dst = append(dst, e.buf...) putEvent(e) case error: if m == nil || isNilValue(m) { dst = enc.AppendNil(dst) } else { dst = enc.AppendString(dst, m.Error()) } case string: dst = enc.AppendString(dst, m) default: dst = enc.AppendInterface(dst, m) } if i < (len(val) - 1) { enc.AppendArrayDelim(dst) } } dst = enc.AppendArrayEnd(dst) case bool: dst = enc.AppendBool(dst, val) case int: dst = enc.AppendInt(dst, val) case int8: dst = enc.AppendInt8(dst, val) case int16: dst = enc.AppendInt16(dst, val) case int32: dst = enc.AppendInt32(dst, val) case int64: dst = enc.AppendInt64(dst, val) case uint: dst = enc.AppendUint(dst, val) case uint8: dst = enc.AppendUint8(dst, val) case uint16: dst = enc.AppendUint16(dst, val) case uint32: dst = enc.AppendUint32(dst, val) case uint64: dst = enc.AppendUint64(dst, val) case float32: dst = enc.AppendFloat32(dst, val) case float64: dst = enc.AppendFloat64(dst, val) case time.Time: dst = enc.AppendTime(dst, val, TimeFieldFormat) case time.Duration: dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) case *string: if val != nil { dst = enc.AppendString(dst, *val) } else { dst = enc.AppendNil(dst) } case *bool: if val != nil { dst = enc.AppendBool(dst, *val) } else { dst = enc.AppendNil(dst) } case *int: if val != nil { dst = enc.AppendInt(dst, *val) } else { dst = enc.AppendNil(dst) } case *int8: if val != nil { dst = enc.AppendInt8(dst, *val) } else { dst = enc.AppendNil(dst) } case *int16: if val != nil { dst = enc.AppendInt16(dst, *val) } else { dst = enc.AppendNil(dst) } case *int32: if val != nil { dst = enc.AppendInt32(dst, *val) } else { dst = enc.AppendNil(dst) } case *int64: if val != nil { dst = enc.AppendInt64(dst, *val) } else { dst = enc.AppendNil(dst) } case *uint: if val != nil { dst = enc.AppendUint(dst, *val) } else { dst = enc.AppendNil(dst) } case *uint8: if val != nil { dst = enc.AppendUint8(dst, *val) } else { dst = enc.AppendNil(dst) } case *uint16: if val != nil { dst = enc.AppendUint16(dst, *val) } else { dst = enc.AppendNil(dst) } case *uint32: if val != nil { dst = enc.AppendUint32(dst, *val) } else { dst = enc.AppendNil(dst) } case *uint64: if val != nil { dst = enc.AppendUint64(dst, *val) } else { dst = enc.AppendNil(dst) } case *float32: if val != nil { dst = enc.AppendFloat32(dst, *val) } else { dst = enc.AppendNil(dst) } case *float64: if val != nil { dst = enc.AppendFloat64(dst, *val) } else { dst = enc.AppendNil(dst) } case *time.Time: if val != nil { dst = enc.AppendTime(dst, *val, TimeFieldFormat) } else { dst = enc.AppendNil(dst) } case *time.Duration: if val != nil { dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) } else { dst = enc.AppendNil(dst) } case []string: dst = enc.AppendStrings(dst, val) case []bool: dst = enc.AppendBools(dst, val) case []int: dst = enc.AppendInts(dst, val) case []int8: dst = enc.AppendInts8(dst, val) case []int16: dst = enc.AppendInts16(dst, val) case []int32: dst = enc.AppendInts32(dst, val) case []int64: dst = enc.AppendInts64(dst, val) case []uint: dst = enc.AppendUints(dst, val) // case []uint8: // dst = enc.AppendUints8(dst, val) case []uint16: dst = enc.AppendUints16(dst, val) case []uint32: dst = enc.AppendUints32(dst, val) case []uint64: dst = enc.AppendUints64(dst, val) case []float32: dst = enc.AppendFloats32(dst, val) case []float64: dst = enc.AppendFloats64(dst, val) case []time.Time: dst = enc.AppendTimes(dst, val, TimeFieldFormat) case []time.Duration: dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) case nil: dst = enc.AppendNil(dst) case net.IP: dst = enc.AppendIPAddr(dst, val) case net.IPNet: dst = enc.AppendIPPrefix(dst, val) case net.HardwareAddr: dst = enc.AppendMACAddr(dst, val) default: dst = enc.AppendInterface(dst, val) } } return dst }
8,784
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/syslog.go
// +build !windows // +build !binary_log package zerolog import ( "io" ) // SyslogWriter is an interface matching a syslog.Writer struct. type SyslogWriter interface { io.Writer Debug(m string) error Info(m string) error Warning(m string) error Err(m string) error Emerg(m string) error Crit(m string) error } type syslogWriter struct { w SyslogWriter } // SyslogLevelWriter wraps a SyslogWriter and call the right syslog level // method matching the zerolog level. func SyslogLevelWriter(w SyslogWriter) LevelWriter { return syslogWriter{w} } func (sw syslogWriter) Write(p []byte) (n int, err error) { return sw.w.Write(p) } // WriteLevel implements LevelWriter interface. func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { switch level { case TraceLevel: case DebugLevel: err = sw.w.Debug(string(p)) case InfoLevel: err = sw.w.Info(string(p)) case WarnLevel: err = sw.w.Warning(string(p)) case ErrorLevel: err = sw.w.Err(string(p)) case FatalLevel: err = sw.w.Emerg(string(p)) case PanicLevel: err = sw.w.Crit(string(p)) case NoLevel: err = sw.w.Info(string(p)) default: panic("invalid level") } n = len(p) return }
8,785
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/sampler.go
package zerolog import ( "math/rand" "sync/atomic" "time" ) var ( // Often samples log every ~ 10 events. Often = RandomSampler(10) // Sometimes samples log every ~ 100 events. Sometimes = RandomSampler(100) // Rarely samples log every ~ 1000 events. Rarely = RandomSampler(1000) ) // Sampler defines an interface to a log sampler. type Sampler interface { // Sample returns true if the event should be part of the sample, false if // the event should be dropped. Sample(lvl Level) bool } // RandomSampler use a PRNG to randomly sample an event out of N events, // regardless of their level. type RandomSampler uint32 // Sample implements the Sampler interface. func (s RandomSampler) Sample(lvl Level) bool { if s <= 0 { return false } if rand.Intn(int(s)) != 0 { return false } return true } // BasicSampler is a sampler that will send every Nth events, regardless of // there level. type BasicSampler struct { N uint32 counter uint32 } // Sample implements the Sampler interface. func (s *BasicSampler) Sample(lvl Level) bool { n := s.N if n == 1 { return true } c := atomic.AddUint32(&s.counter, 1) return c%n == 1 } // BurstSampler lets Burst events pass per Period then pass the decision to // NextSampler. If Sampler is not set, all subsequent events are rejected. type BurstSampler struct { // Burst is the maximum number of event per period allowed before calling // NextSampler. Burst uint32 // Period defines the burst period. If 0, NextSampler is always called. Period time.Duration // NextSampler is the sampler used after the burst is reached. If nil, // events are always rejected after the burst. NextSampler Sampler counter uint32 resetAt int64 } // Sample implements the Sampler interface. func (s *BurstSampler) Sample(lvl Level) bool { if s.Burst > 0 && s.Period > 0 { if s.inc() <= s.Burst { return true } } if s.NextSampler == nil { return false } return s.NextSampler.Sample(lvl) } func (s *BurstSampler) inc() uint32 { now := time.Now().UnixNano() resetAt := atomic.LoadInt64(&s.resetAt) var c uint32 if now > resetAt { c = 1 atomic.StoreUint32(&s.counter, c) newResetAt := now + s.Period.Nanoseconds() reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) if !reset { // Lost the race with another goroutine trying to reset. c = atomic.AddUint32(&s.counter, 1) } } else { c = atomic.AddUint32(&s.counter, 1) } return c } // LevelSampler applies a different sampler for each level. type LevelSampler struct { TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler } func (s LevelSampler) Sample(lvl Level) bool { switch lvl { case TraceLevel: if s.TraceSampler != nil { return s.TraceSampler.Sample(lvl) } case DebugLevel: if s.DebugSampler != nil { return s.DebugSampler.Sample(lvl) } case InfoLevel: if s.InfoSampler != nil { return s.InfoSampler.Sample(lvl) } case WarnLevel: if s.WarnSampler != nil { return s.WarnSampler.Sample(lvl) } case ErrorLevel: if s.ErrorSampler != nil { return s.ErrorSampler.Sample(lvl) } } return true }
8,786
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/log/log.go
// Package log provides a global logger for zerolog. package log import ( "context" "io" "os" "github.com/rs/zerolog" ) // Logger is the global logger. var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger() // Output duplicates the global logger and sets w as its output. func Output(w io.Writer) zerolog.Logger { return Logger.Output(w) } // With creates a child logger with the field added to its context. func With() zerolog.Context { return Logger.With() } // Level creates a child logger with the minimum accepted level set to level. func Level(level zerolog.Level) zerolog.Logger { return Logger.Level(level) } // Sample returns a logger with the s sampler. func Sample(s zerolog.Sampler) zerolog.Logger { return Logger.Sample(s) } // Hook returns a logger with the h Hook. func Hook(h zerolog.Hook) zerolog.Logger { return Logger.Hook(h) } // Err starts a new message with error level with err as a field if not nil or // with info level if err is nil. // // You must call Msg on the returned event in order to send the event. func Err(err error) *zerolog.Event { return Logger.Err(err) } // Trace starts a new message with trace level. // // You must call Msg on the returned event in order to send the event. func Trace() *zerolog.Event { return Logger.Trace() } // Debug starts a new message with debug level. // // You must call Msg on the returned event in order to send the event. func Debug() *zerolog.Event { return Logger.Debug() } // Info starts a new message with info level. // // You must call Msg on the returned event in order to send the event. func Info() *zerolog.Event { return Logger.Info() } // Warn starts a new message with warn level. // // You must call Msg on the returned event in order to send the event. func Warn() *zerolog.Event { return Logger.Warn() } // Error starts a new message with error level. // // You must call Msg on the returned event in order to send the event. func Error() *zerolog.Event { return Logger.Error() } // Fatal starts a new message with fatal level. The os.Exit(1) function // is called by the Msg method. // // You must call Msg on the returned event in order to send the event. func Fatal() *zerolog.Event { return Logger.Fatal() } // Panic starts a new message with panic level. The message is also sent // to the panic function. // // You must call Msg on the returned event in order to send the event. func Panic() *zerolog.Event { return Logger.Panic() } // WithLevel starts a new message with level. // // You must call Msg on the returned event in order to send the event. func WithLevel(level zerolog.Level) *zerolog.Event { return Logger.WithLevel(level) } // Log starts a new message with no level. Setting zerolog.GlobalLevel to // zerolog.Disabled will still disable events produced by this method. // // You must call Msg on the returned event in order to send the event. func Log() *zerolog.Event { return Logger.Log() } // Print sends a log event using debug level and no extra field. // Arguments are handled in the manner of fmt.Print. func Print(v ...interface{}) { Logger.Print(v...) } // Printf sends a log event using debug level and no extra field. // Arguments are handled in the manner of fmt.Printf. func Printf(format string, v ...interface{}) { Logger.Printf(format, v...) } // Ctx returns the Logger associated with the ctx. If no logger // is associated, a disabled logger is returned. func Ctx(ctx context.Context) *zerolog.Logger { return zerolog.Ctx(ctx) }
8,787
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/README.md
## Reference: CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) ## Comparison of JSON vs CBOR Two main areas of reduction are: 1. CPU usage to write a log msg 2. Size (in bytes) of log messages. CPU Usage savings are below: ``` name JSON time/op CBOR time/op delta Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) ``` Log message size savings is greatly dependent on the number and type of fields in the log message. Assuming this log message (with an Integer, timestamp and string, in addition to level). `{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` Two measurements were done for the log file sizes - one without any compression, second using [compress/zlib](https://golang.org/pkg/compress/zlib/). Results for 10,000 log messages: | Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | | :--- | :---: | :---: | | JSON | 920 | 28 | | CBOR | 550 | 28 | The example used to calculate the above data is available in [Examples](examples).
8,788
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/types.go
package cbor import ( "encoding/json" "fmt" "math" "net" ) // AppendNil inserts a 'Nil' object into the dst byte array. func (Encoder) AppendNil(dst []byte) []byte { return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull)) } // AppendBeginMarker inserts a map start into the dst byte array. func (Encoder) AppendBeginMarker(dst []byte) []byte { return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount)) } // AppendEndMarker inserts a map end into the dst byte array. func (Encoder) AppendEndMarker(dst []byte) []byte { return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) } // AppendObjectData takes an object in form of a byte array and appends to dst. func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { // BeginMarker is present in the dst, which // should not be copied when appending to existing data. return append(dst, o[1:]...) } // AppendArrayStart adds markers to indicate the start of an array. func (Encoder) AppendArrayStart(dst []byte) []byte { return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount)) } // AppendArrayEnd adds markers to indicate the end of an array. func (Encoder) AppendArrayEnd(dst []byte) []byte { return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) } // AppendArrayDelim adds markers to indicate end of a particular array element. func (Encoder) AppendArrayDelim(dst []byte) []byte { //No delimiters needed in cbor return dst } // AppendLineBreak is a noop that keep API compat with json encoder. func (Encoder) AppendLineBreak(dst []byte) []byte { // No line breaks needed in binary format. return dst } // AppendBool encodes and inserts a boolean value into the dst byte array. func (Encoder) AppendBool(dst []byte, val bool) []byte { b := additionalTypeBoolFalse if val { b = additionalTypeBoolTrue } return append(dst, byte(majorTypeSimpleAndFloat|b)) } // AppendBools encodes and inserts an array of boolean values into the dst byte array. func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendBool(dst, v) } return dst } // AppendInt encodes and inserts an integer value into the dst byte array. func (Encoder) AppendInt(dst []byte, val int) []byte { major := majorTypeUnsignedInt contentVal := val if val < 0 { major = majorTypeNegativeInt contentVal = -val - 1 } if contentVal <= additionalMax { lb := byte(contentVal) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(contentVal)) } return dst } // AppendInts encodes and inserts an array of integer values into the dst byte array. func (e Encoder) AppendInts(dst []byte, vals []int) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendInt(dst, v) } return dst } // AppendInt8 encodes and inserts an int8 value into the dst byte array. func (e Encoder) AppendInt8(dst []byte, val int8) []byte { return e.AppendInt(dst, int(val)) } // AppendInts8 encodes and inserts an array of integer values into the dst byte array. func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendInt(dst, int(v)) } return dst } // AppendInt16 encodes and inserts a int16 value into the dst byte array. func (e Encoder) AppendInt16(dst []byte, val int16) []byte { return e.AppendInt(dst, int(val)) } // AppendInts16 encodes and inserts an array of int16 values into the dst byte array. func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendInt(dst, int(v)) } return dst } // AppendInt32 encodes and inserts a int32 value into the dst byte array. func (e Encoder) AppendInt32(dst []byte, val int32) []byte { return e.AppendInt(dst, int(val)) } // AppendInts32 encodes and inserts an array of int32 values into the dst byte array. func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendInt(dst, int(v)) } return dst } // AppendInt64 encodes and inserts a int64 value into the dst byte array. func (Encoder) AppendInt64(dst []byte, val int64) []byte { major := majorTypeUnsignedInt contentVal := val if val < 0 { major = majorTypeNegativeInt contentVal = -val - 1 } if contentVal <= additionalMax { lb := byte(contentVal) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(contentVal)) } return dst } // AppendInts64 encodes and inserts an array of int64 values into the dst byte array. func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendInt64(dst, v) } return dst } // AppendUint encodes and inserts an unsigned integer value into the dst byte array. func (e Encoder) AppendUint(dst []byte, val uint) []byte { return e.AppendInt64(dst, int64(val)) } // AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendUint(dst, v) } return dst } // AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { return e.AppendUint(dst, uint(val)) } // AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendUint8(dst, v) } return dst } // AppendUint16 encodes and inserts a uint16 value into the dst byte array. func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { return e.AppendUint(dst, uint(val)) } // AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendUint16(dst, v) } return dst } // AppendUint32 encodes and inserts a uint32 value into the dst byte array. func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { return e.AppendUint(dst, uint(val)) } // AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendUint32(dst, v) } return dst } // AppendUint64 encodes and inserts a uint64 value into the dst byte array. func (Encoder) AppendUint64(dst []byte, val uint64) []byte { major := majorTypeUnsignedInt contentVal := val if contentVal <= additionalMax { lb := byte(contentVal) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(contentVal)) } return dst } // AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendUint64(dst, v) } return dst } // AppendFloat32 encodes and inserts a single precision float value into the dst byte array. func (Encoder) AppendFloat32(dst []byte, val float32) []byte { switch { case math.IsNaN(float64(val)): return append(dst, "\xfa\x7f\xc0\x00\x00"...) case math.IsInf(float64(val), 1): return append(dst, "\xfa\x7f\x80\x00\x00"...) case math.IsInf(float64(val), -1): return append(dst, "\xfa\xff\x80\x00\x00"...) } major := majorTypeSimpleAndFloat subType := additionalTypeFloat32 n := math.Float32bits(val) var buf [4]byte for i := uint(0); i < 4; i++ { buf[i] = byte(n >> ((3 - i) * 8)) } return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3]) } // AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendFloat32(dst, v) } return dst } // AppendFloat64 encodes and inserts a double precision float value into the dst byte array. func (Encoder) AppendFloat64(dst []byte, val float64) []byte { switch { case math.IsNaN(val): return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) case math.IsInf(val, 1): return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) case math.IsInf(val, -1): return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) } major := majorTypeSimpleAndFloat subType := additionalTypeFloat64 n := math.Float64bits(val) dst = append(dst, byte(major|subType)) for i := uint(1); i <= 8; i++ { b := byte(n >> ((8 - i) * 8)) dst = append(dst, b) } return dst } // AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendFloat64(dst, v) } return dst } // AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { marshaled, err := json.Marshal(i) if err != nil { return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) } return AppendEmbeddedJSON(dst, marshaled) } // AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) return e.AppendBytes(dst, ip) } // AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) // Prefix is a tuple (aka MAP of 1 pair of elements) - // first element is prefix, second is mask length. dst = append(dst, byte(majorTypeMap|0x1)) dst = e.AppendBytes(dst, pfx.IP) maskLen, _ := pfx.Mask.Size() return e.AppendUint8(dst, uint8(maskLen)) } // AppendMACAddr encodes and inserts an Hardware (MAC) address. func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) return e.AppendBytes(dst, ha) } // AppendHex adds a TAG and inserts a hex bytes as a string. func (e Encoder) AppendHex(dst []byte, val []byte) []byte { dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) dst = append(dst, byte(additionalTypeTagHexString>>8)) dst = append(dst, byte(additionalTypeTagHexString&0xff)) return e.AppendBytes(dst, val) }
8,789
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/cbor.go
// Package cbor provides primitives for storing different data // in the CBOR (binary) format. CBOR is defined in RFC7049. package cbor import "time" const ( majorOffset = 5 additionalMax = 23 // Non Values. additionalTypeBoolFalse byte = 20 additionalTypeBoolTrue byte = 21 additionalTypeNull byte = 22 // Integer (+ve and -ve) Sub-types. additionalTypeIntUint8 byte = 24 additionalTypeIntUint16 byte = 25 additionalTypeIntUint32 byte = 26 additionalTypeIntUint64 byte = 27 // Float Sub-types. additionalTypeFloat16 byte = 25 additionalTypeFloat32 byte = 26 additionalTypeFloat64 byte = 27 additionalTypeBreak byte = 31 // Tag Sub-types. additionalTypeTimestamp byte = 01 // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml additionalTypeTagNetworkAddr uint16 = 260 additionalTypeTagNetworkPrefix uint16 = 261 additionalTypeEmbeddedJSON uint16 = 262 additionalTypeTagHexString uint16 = 263 // Unspecified number of elements. additionalTypeInfiniteCount byte = 31 ) const ( majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 majorTypeNegativeInt // Major type 1 majorTypeByteString // Major type 2 majorTypeUtf8String // Major type 3 majorTypeArray // Major type 4 majorTypeMap // Major type 5 majorTypeTags // Major type 6 majorTypeSimpleAndFloat // Major type 7 ) const ( maskOutAdditionalType byte = (7 << majorOffset) maskOutMajorType byte = 31 ) const ( float32Nan = "\xfa\x7f\xc0\x00\x00" float32PosInfinity = "\xfa\x7f\x80\x00\x00" float32NegInfinity = "\xfa\xff\x80\x00\x00" float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" ) // IntegerTimeFieldFormat indicates the format of timestamp decoded // from an integer (time in seconds). var IntegerTimeFieldFormat = time.RFC3339 // NanoTimeFieldFormat indicates the format of timestamp decoded // from a float value (time in seconds and nano seconds). var NanoTimeFieldFormat = time.RFC3339Nano func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { byteCount := 8 var minor byte switch { case number < 256: byteCount = 1 minor = additionalTypeIntUint8 case number < 65536: byteCount = 2 minor = additionalTypeIntUint16 case number < 4294967296: byteCount = 4 minor = additionalTypeIntUint32 default: byteCount = 8 minor = additionalTypeIntUint64 } dst = append(dst, byte(major|minor)) byteCount-- for ; byteCount >= 0; byteCount-- { dst = append(dst, byte(number>>(uint(byteCount)*8))) } return dst }
8,790
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
package cbor // This file contains code to decode a stream of CBOR Data into JSON. import ( "bufio" "bytes" "fmt" "io" "math" "net" "runtime" "strconv" "strings" "time" "unicode/utf8" ) var decodeTimeZone *time.Location const hexTable = "0123456789abcdef" const isFloat32 = 4 const isFloat64 = 8 func readNBytes(src *bufio.Reader, n int) []byte { ret := make([]byte, n) for i := 0; i < n; i++ { ch, e := src.ReadByte() if e != nil { panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) } ret[i] = ch } return ret } func readByte(src *bufio.Reader) byte { b, e := src.ReadByte() if e != nil { panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) } return b } func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 { val := int64(0) if minor <= 23 { val = int64(minor) } else { bytesToRead := 0 switch minor { case additionalTypeIntUint8: bytesToRead = 1 case additionalTypeIntUint16: bytesToRead = 2 case additionalTypeIntUint32: bytesToRead = 4 case additionalTypeIntUint64: bytesToRead = 8 default: panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) } pb := readNBytes(src, bytesToRead) for i := 0; i < bytesToRead; i++ { val = val * 256 val += int64(pb[i]) } } return val } func decodeInteger(src *bufio.Reader) int64 { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) } val := decodeIntAdditonalType(src, minor) if major == 0 { return val } return (-1 - val) } func decodeFloat(src *bufio.Reader) (float64, int) { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeSimpleAndFloat { panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) } switch minor { case additionalTypeFloat16: panic(fmt.Errorf("float16 is not suppported in decodeFloat")) case additionalTypeFloat32: pb := readNBytes(src, 4) switch string(pb) { case float32Nan: return math.NaN(), isFloat32 case float32PosInfinity: return math.Inf(0), isFloat32 case float32NegInfinity: return math.Inf(-1), isFloat32 } n := uint32(0) for i := 0; i < 4; i++ { n = n * 256 n += uint32(pb[i]) } val := math.Float32frombits(n) return float64(val), isFloat32 case additionalTypeFloat64: pb := readNBytes(src, 8) switch string(pb) { case float64Nan: return math.NaN(), isFloat64 case float64PosInfinity: return math.Inf(0), isFloat64 case float64NegInfinity: return math.Inf(-1), isFloat64 } n := uint64(0) for i := 0; i < 8; i++ { n = n * 256 n += uint64(pb[i]) } val := math.Float64frombits(n) return val, isFloat64 } panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) } func decodeStringComplex(dst []byte, s string, pos uint) []byte { i := int(pos) start := 0 for i < len(s) { b := s[i] if b >= utf8.RuneSelf { r, size := utf8.DecodeRuneInString(s[i:]) if r == utf8.RuneError && size == 1 { // In case of error, first append previous simple characters to // the byte slice if any and append a replacement character code // in place of the invalid sequence. if start < i { dst = append(dst, s[start:i]...) } dst = append(dst, `\ufffd`...) i += size start = i continue } i += size continue } if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { i++ continue } // We encountered a character that needs to be encoded. // Let's append the previous simple characters to the byte slice // and switch our operation to read and encode the remainder // characters byte-by-byte. if start < i { dst = append(dst, s[start:i]...) } switch b { case '"', '\\': dst = append(dst, '\\', b) case '\b': dst = append(dst, '\\', 'b') case '\f': dst = append(dst, '\\', 'f') case '\n': dst = append(dst, '\\', 'n') case '\r': dst = append(dst, '\\', 'r') case '\t': dst = append(dst, '\\', 't') default: dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) } i++ start = i } if start < len(s) { dst = append(dst, s[start:]...) } return dst } func decodeString(src *bufio.Reader, noQuotes bool) []byte { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeByteString { panic(fmt.Errorf("Major type is: %d in decodeString", major)) } result := []byte{} if !noQuotes { result = append(result, '"') } length := decodeIntAdditonalType(src, minor) len := int(length) pbs := readNBytes(src, len) result = append(result, pbs...) if noQuotes { return result } return append(result, '"') } func decodeUTF8String(src *bufio.Reader) []byte { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeUtf8String { panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) } result := []byte{'"'} length := decodeIntAdditonalType(src, minor) len := int(length) pbs := readNBytes(src, len) for i := 0; i < len; i++ { // Check if the character needs encoding. Control characters, slashes, // and the double quote need json encoding. Bytes above the ascii // boundary needs utf8 encoding. if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { // We encountered a character that needs to be encoded. Switch // to complex version of the algorithm. dst := []byte{'"'} dst = decodeStringComplex(dst, string(pbs), uint(i)) return append(dst, '"') } } // The string has no need for encoding an therefore is directly // appended to the byte slice. result = append(result, pbs...) return append(result, '"') } func array2Json(src *bufio.Reader, dst io.Writer) { dst.Write([]byte{'['}) pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeArray { panic(fmt.Errorf("Major type is: %d in array2Json", major)) } len := 0 unSpecifiedCount := false if minor == additionalTypeInfiniteCount { unSpecifiedCount = true } else { length := decodeIntAdditonalType(src, minor) len = int(length) } for i := 0; unSpecifiedCount || i < len; i++ { if unSpecifiedCount { pb, e := src.Peek(1) if e != nil { panic(e) } if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { readByte(src) break } } cbor2JsonOneObject(src, dst) if unSpecifiedCount { pb, e := src.Peek(1) if e != nil { panic(e) } if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { readByte(src) break } dst.Write([]byte{','}) } else if i+1 < len { dst.Write([]byte{','}) } } dst.Write([]byte{']'}) } func map2Json(src *bufio.Reader, dst io.Writer) { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeMap { panic(fmt.Errorf("Major type is: %d in map2Json", major)) } len := 0 unSpecifiedCount := false if minor == additionalTypeInfiniteCount { unSpecifiedCount = true } else { length := decodeIntAdditonalType(src, minor) len = int(length) } dst.Write([]byte{'{'}) for i := 0; unSpecifiedCount || i < len; i++ { if unSpecifiedCount { pb, e := src.Peek(1) if e != nil { panic(e) } if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { readByte(src) break } } cbor2JsonOneObject(src, dst) if i%2 == 0 { // Even position values are keys. dst.Write([]byte{':'}) } else { if unSpecifiedCount { pb, e := src.Peek(1) if e != nil { panic(e) } if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { readByte(src) break } dst.Write([]byte{','}) } else if i+1 < len { dst.Write([]byte{','}) } } } dst.Write([]byte{'}'}) } func decodeTagData(src *bufio.Reader) []byte { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeTags { panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) } switch minor { case additionalTypeTimestamp: return decodeTimeStamp(src) // Tag value is larger than 256 (so uint16). case additionalTypeIntUint16: val := decodeIntAdditonalType(src, minor) switch uint16(val) { case additionalTypeEmbeddedJSON: pb := readByte(src) dataMajor := pb & maskOutAdditionalType if dataMajor != majorTypeByteString { panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) } src.UnreadByte() return decodeString(src, true) case additionalTypeTagNetworkAddr: octets := decodeString(src, true) ss := []byte{'"'} switch len(octets) { case 6: // MAC address. ha := net.HardwareAddr(octets) ss = append(append(ss, ha.String()...), '"') case 4: // IPv4 address. fallthrough case 16: // IPv6 address. ip := net.IP(octets) ss = append(append(ss, ip.String()...), '"') default: panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) } return ss case additionalTypeTagNetworkPrefix: pb := readByte(src) if pb != byte(majorTypeMap|0x1) { panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) } octets := decodeString(src, true) val := decodeInteger(src) ip := net.IP(octets) var mask net.IPMask pfxLen := int(val) if len(octets) == 4 { mask = net.CIDRMask(pfxLen, 32) } else { mask = net.CIDRMask(pfxLen, 128) } ipPfx := net.IPNet{IP: ip, Mask: mask} ss := []byte{'"'} ss = append(append(ss, ipPfx.String()...), '"') return ss case additionalTypeTagHexString: octets := decodeString(src, true) ss := []byte{'"'} for _, v := range octets { ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) } return append(ss, '"') default: panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) } } panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) } func decodeTimeStamp(src *bufio.Reader) []byte { pb := readByte(src) src.UnreadByte() tsMajor := pb & maskOutAdditionalType if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { n := decodeInteger(src) t := time.Unix(n, 0) if decodeTimeZone != nil { t = t.In(decodeTimeZone) } else { t = t.In(time.UTC) } tsb := []byte{} tsb = append(tsb, '"') tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) tsb = append(tsb, '"') return tsb } else if tsMajor == majorTypeSimpleAndFloat { n, _ := decodeFloat(src) secs := int64(n) n -= float64(secs) n *= float64(1e9) t := time.Unix(secs, int64(n)) if decodeTimeZone != nil { t = t.In(decodeTimeZone) } else { t = t.In(time.UTC) } tsb := []byte{} tsb = append(tsb, '"') tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) tsb = append(tsb, '"') return tsb } panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) } func decodeSimpleFloat(src *bufio.Reader) []byte { pb := readByte(src) major := pb & maskOutAdditionalType minor := pb & maskOutMajorType if major != majorTypeSimpleAndFloat { panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) } switch minor { case additionalTypeBoolTrue: return []byte("true") case additionalTypeBoolFalse: return []byte("false") case additionalTypeNull: return []byte("null") case additionalTypeFloat16: fallthrough case additionalTypeFloat32: fallthrough case additionalTypeFloat64: src.UnreadByte() v, bc := decodeFloat(src) ba := []byte{} switch { case math.IsNaN(v): return []byte("\"NaN\"") case math.IsInf(v, 1): return []byte("\"+Inf\"") case math.IsInf(v, -1): return []byte("\"-Inf\"") } if bc == isFloat32 { ba = strconv.AppendFloat(ba, v, 'f', -1, 32) } else if bc == isFloat64 { ba = strconv.AppendFloat(ba, v, 'f', -1, 64) } else { panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) } return ba default: panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) } } func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { pb, e := src.Peek(1) if e != nil { panic(e) } major := (pb[0] & maskOutAdditionalType) switch major { case majorTypeUnsignedInt: fallthrough case majorTypeNegativeInt: n := decodeInteger(src) dst.Write([]byte(strconv.Itoa(int(n)))) case majorTypeByteString: s := decodeString(src, false) dst.Write(s) case majorTypeUtf8String: s := decodeUTF8String(src) dst.Write(s) case majorTypeArray: array2Json(src, dst) case majorTypeMap: map2Json(src, dst) case majorTypeTags: s := decodeTagData(src) dst.Write(s) case majorTypeSimpleAndFloat: s := decodeSimpleFloat(src) dst.Write(s) } } func moreBytesToRead(src *bufio.Reader) bool { _, e := src.ReadByte() if e == nil { src.UnreadByte() return true } return false } // Cbor2JsonManyObjects decodes all the CBOR Objects read from src // reader. It keeps on decoding until reader returns EOF (error when reading). // Decoded string is written to the dst. At the end of every CBOR Object // newline is written to the output stream. // // Returns error (if any) that was encountered during decode. // The child functions will generate a panic when error is encountered and // this function will recover non-runtime Errors and return the reason as error. func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } err = r.(error) } }() bufRdr := bufio.NewReader(src) for moreBytesToRead(bufRdr) { cbor2JsonOneObject(bufRdr, dst) dst.Write([]byte("\n")) } return nil } // Detect if the bytes to be printed is Binary or not. func binaryFmt(p []byte) bool { if len(p) > 0 && p[0] > 0x7F { return true } return false } func getReader(str string) *bufio.Reader { return bufio.NewReader(strings.NewReader(str)) } // DecodeIfBinaryToString converts a binary formatted log msg to a // JSON formatted String Log message - suitable for printing to Console/Syslog. func DecodeIfBinaryToString(in []byte) string { if binaryFmt(in) { var b bytes.Buffer Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) return b.String() } return string(in) } // DecodeObjectToStr checks if the input is a binary format, if so, // it will decode a single Object and return the decoded string. func DecodeObjectToStr(in []byte) string { if binaryFmt(in) { var b bytes.Buffer cbor2JsonOneObject(getReader(string(in)), &b) return b.String() } return string(in) } // DecodeIfBinaryToBytes checks if the input is a binary format, if so, // it will decode all Objects and return the decoded string as byte array. func DecodeIfBinaryToBytes(in []byte) []byte { if binaryFmt(in) { var b bytes.Buffer Cbor2JsonManyObjects(bytes.NewReader(in), &b) return b.Bytes() } return in }
8,791
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/time.go
package cbor import ( "time" ) func appendIntegerTimestamp(dst []byte, t time.Time) []byte { major := majorTypeTags minor := additionalTypeTimestamp dst = append(dst, byte(major|minor)) secs := t.Unix() var val uint64 if secs < 0 { major = majorTypeNegativeInt val = uint64(-secs - 1) } else { major = majorTypeUnsignedInt val = uint64(secs) } dst = appendCborTypePrefix(dst, major, uint64(val)) return dst } func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { major := majorTypeTags minor := additionalTypeTimestamp dst = append(dst, byte(major|minor)) secs := t.Unix() nanos := t.Nanosecond() var val float64 val = float64(secs)*1.0 + float64(nanos)*1E-9 return e.AppendFloat64(dst, val) } // AppendTime encodes and adds a timestamp to the dst byte array. func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { utc := t.UTC() if utc.Nanosecond() == 0 { return appendIntegerTimestamp(dst, utc) } return e.appendFloatTimestamp(dst, utc) } // AppendTimes encodes and adds an array of timestamps to the dst byte array. func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, t := range vals { dst = e.AppendTime(dst, t, unused) } return dst } // AppendDuration encodes and adds a duration to the dst byte array. // useInt field indicates whether to store the duration as seconds (integer) or // as seconds+nanoseconds (float). func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { if useInt { return e.AppendInt64(dst, int64(d/unit)) } return e.AppendFloat64(dst, float64(d)/float64(unit)) } // AppendDurations encodes and adds an array of durations to the dst byte array. // useInt field indicates whether to store the duration as seconds (integer) or // as seconds+nanoseconds (float). func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { major := majorTypeArray l := len(vals) if l == 0 { return e.AppendArrayEnd(e.AppendArrayStart(dst)) } if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, d := range vals { dst = e.AppendDuration(dst, d, unit, useInt) } return dst }
8,792
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/string.go
package cbor // AppendStrings encodes and adds an array of strings to the dst byte array. func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { major := majorTypeArray l := len(vals) if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } for _, v := range vals { dst = e.AppendString(dst, v) } return dst } // AppendString encodes and adds a string to the dst byte array. func (Encoder) AppendString(dst []byte, s string) []byte { major := majorTypeUtf8String l := len(s) if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) } return append(dst, s...) } // AppendBytes encodes and adds an array of bytes to the dst byte array. func (Encoder) AppendBytes(dst, s []byte) []byte { major := majorTypeByteString l := len(s) if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } return append(dst, s...) } // AppendEmbeddedJSON adds a tag and embeds input JSON as such. func AppendEmbeddedJSON(dst, s []byte) []byte { major := majorTypeTags minor := additionalTypeEmbeddedJSON // Append the TAG to indicate this is Embedded JSON. dst = append(dst, byte(major|additionalTypeIntUint16)) dst = append(dst, byte(minor>>8)) dst = append(dst, byte(minor&0xff)) // Append the JSON Object as Byte String. major = majorTypeByteString l := len(s) if l <= additionalMax { lb := byte(l) dst = append(dst, byte(major|lb)) } else { dst = appendCborTypePrefix(dst, major, uint64(l)) } return append(dst, s...) }
8,793
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/base.go
package cbor type Encoder struct{} // AppendKey adds a key (string) to the binary encoded log message func (e Encoder) AppendKey(dst []byte, key string) []byte { if len(dst) < 1 { dst = e.AppendBeginMarker(dst) } return e.AppendString(dst, key) }
8,794
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/cbor/types_test_64.go
// +build !386 package cbor import ( "encoding/hex" "testing" ) var enc2 = Encoder{} var integerTestCases_64bit = []struct { val int binary string }{ // Value in 8 bytes. {0xabcd100000000, "\x1b\x00\x0a\xbc\xd1\x00\x00\x00\x00"}, {1000000000000, "\x1b\x00\x00\x00\xe8\xd4\xa5\x10\x00"}, // Value in 8 bytes. {-0xabcd100000001, "\x3b\x00\x0a\xbc\xd1\x00\x00\x00\x00"}, {-1000000000001, "\x3b\x00\x00\x00\xe8\xd4\xa5\x10\x00"}, } func TestAppendInt_64bit(t *testing.T) { for _, tc := range integerTestCases_64bit { s := enc2.AppendInt([]byte{}, tc.val) got := string(s) if got != tc.binary { t.Errorf("AppendInt(0x%x)=0x%s, want: 0x%s", tc.val, hex.EncodeToString(s), hex.EncodeToString([]byte(tc.binary))) } } }
8,795
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/json/bytes.go
package json import "unicode/utf8" // AppendBytes is a mirror of appendString with []byte arg func (Encoder) AppendBytes(dst, s []byte) []byte { dst = append(dst, '"') for i := 0; i < len(s); i++ { if !noEscapeTable[s[i]] { dst = appendBytesComplex(dst, s, i) return append(dst, '"') } } dst = append(dst, s...) return append(dst, '"') } // AppendHex encodes the input bytes to a hex string and appends // the encoded string to the input byte slice. // // The operation loops though each byte and encodes it as hex using // the hex lookup table. func (Encoder) AppendHex(dst, s []byte) []byte { dst = append(dst, '"') for _, v := range s { dst = append(dst, hex[v>>4], hex[v&0x0f]) } return append(dst, '"') } // appendBytesComplex is a mirror of the appendStringComplex // with []byte arg func appendBytesComplex(dst, s []byte, i int) []byte { start := 0 for i < len(s) { b := s[i] if b >= utf8.RuneSelf { r, size := utf8.DecodeRune(s[i:]) if r == utf8.RuneError && size == 1 { if start < i { dst = append(dst, s[start:i]...) } dst = append(dst, `\ufffd`...) i += size start = i continue } i += size continue } if noEscapeTable[b] { i++ continue } // We encountered a character that needs to be encoded. // Let's append the previous simple characters to the byte slice // and switch our operation to read and encode the remainder // characters byte-by-byte. if start < i { dst = append(dst, s[start:i]...) } switch b { case '"', '\\': dst = append(dst, '\\', b) case '\b': dst = append(dst, '\\', 'b') case '\f': dst = append(dst, '\\', 'f') case '\n': dst = append(dst, '\\', 'n') case '\r': dst = append(dst, '\\', 'r') case '\t': dst = append(dst, '\\', 't') default: dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) } i++ start = i } if start < len(s) { dst = append(dst, s[start:]...) } return dst }
8,796
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/json/types.go
package json import ( "encoding/json" "fmt" "math" "net" "strconv" ) // AppendNil inserts a 'Nil' object into the dst byte array. func (Encoder) AppendNil(dst []byte) []byte { return append(dst, "null"...) } // AppendBeginMarker inserts a map start into the dst byte array. func (Encoder) AppendBeginMarker(dst []byte) []byte { return append(dst, '{') } // AppendEndMarker inserts a map end into the dst byte array. func (Encoder) AppendEndMarker(dst []byte) []byte { return append(dst, '}') } // AppendLineBreak appends a line break. func (Encoder) AppendLineBreak(dst []byte) []byte { return append(dst, '\n') } // AppendArrayStart adds markers to indicate the start of an array. func (Encoder) AppendArrayStart(dst []byte) []byte { return append(dst, '[') } // AppendArrayEnd adds markers to indicate the end of an array. func (Encoder) AppendArrayEnd(dst []byte) []byte { return append(dst, ']') } // AppendArrayDelim adds markers to indicate end of a particular array element. func (Encoder) AppendArrayDelim(dst []byte) []byte { if len(dst) > 0 { return append(dst, ',') } return dst } // AppendBool converts the input bool to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendBool(dst []byte, val bool) []byte { return strconv.AppendBool(dst, val) } // AppendBools encodes the input bools to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendBools(dst []byte, vals []bool) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendBool(dst, vals[0]) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendBool(append(dst, ','), val) } } dst = append(dst, ']') return dst } // AppendInt converts the input int to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendInt(dst []byte, val int) []byte { return strconv.AppendInt(dst, int64(val), 10) } // AppendInts encodes the input ints to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendInts(dst []byte, vals []int) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, int64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), int64(val), 10) } } dst = append(dst, ']') return dst } // AppendInt8 converts the input []int8 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendInt8(dst []byte, val int8) []byte { return strconv.AppendInt(dst, int64(val), 10) } // AppendInts8 encodes the input int8s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, int64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), int64(val), 10) } } dst = append(dst, ']') return dst } // AppendInt16 converts the input int16 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendInt16(dst []byte, val int16) []byte { return strconv.AppendInt(dst, int64(val), 10) } // AppendInts16 encodes the input int16s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, int64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), int64(val), 10) } } dst = append(dst, ']') return dst } // AppendInt32 converts the input int32 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendInt32(dst []byte, val int32) []byte { return strconv.AppendInt(dst, int64(val), 10) } // AppendInts32 encodes the input int32s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, int64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), int64(val), 10) } } dst = append(dst, ']') return dst } // AppendInt64 converts the input int64 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendInt64(dst []byte, val int64) []byte { return strconv.AppendInt(dst, val, 10) } // AppendInts64 encodes the input int64s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, vals[0], 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), val, 10) } } dst = append(dst, ']') return dst } // AppendUint converts the input uint to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendUint(dst []byte, val uint) []byte { return strconv.AppendUint(dst, uint64(val), 10) } // AppendUints encodes the input uints to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendUints(dst []byte, vals []uint) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendUint(dst, uint64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) } } dst = append(dst, ']') return dst } // AppendUint8 converts the input uint8 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendUint8(dst []byte, val uint8) []byte { return strconv.AppendUint(dst, uint64(val), 10) } // AppendUints8 encodes the input uint8s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendUint(dst, uint64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) } } dst = append(dst, ']') return dst } // AppendUint16 converts the input uint16 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendUint16(dst []byte, val uint16) []byte { return strconv.AppendUint(dst, uint64(val), 10) } // AppendUints16 encodes the input uint16s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendUint(dst, uint64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) } } dst = append(dst, ']') return dst } // AppendUint32 converts the input uint32 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendUint32(dst []byte, val uint32) []byte { return strconv.AppendUint(dst, uint64(val), 10) } // AppendUints32 encodes the input uint32s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendUint(dst, uint64(vals[0]), 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) } } dst = append(dst, ']') return dst } // AppendUint64 converts the input uint64 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendUint64(dst []byte, val uint64) []byte { return strconv.AppendUint(dst, uint64(val), 10) } // AppendUints64 encodes the input uint64s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendUint(dst, vals[0], 10) if len(vals) > 1 { for _, val := range vals[1:] { dst = strconv.AppendUint(append(dst, ','), val, 10) } } dst = append(dst, ']') return dst } func appendFloat(dst []byte, val float64, bitSize int) []byte { // JSON does not permit NaN or Infinity. A typical JSON encoder would fail // with an error, but a logging library wants the data to get thru so we // make a tradeoff and store those types as string. switch { case math.IsNaN(val): return append(dst, `"NaN"`...) case math.IsInf(val, 1): return append(dst, `"+Inf"`...) case math.IsInf(val, -1): return append(dst, `"-Inf"`...) } return strconv.AppendFloat(dst, val, 'f', -1, bitSize) } // AppendFloat32 converts the input float32 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendFloat32(dst []byte, val float32) []byte { return appendFloat(dst, float64(val), 32) } // AppendFloats32 encodes the input float32s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = appendFloat(dst, float64(vals[0]), 32) if len(vals) > 1 { for _, val := range vals[1:] { dst = appendFloat(append(dst, ','), float64(val), 32) } } dst = append(dst, ']') return dst } // AppendFloat64 converts the input float64 to a string and // appends the encoded string to the input byte slice. func (Encoder) AppendFloat64(dst []byte, val float64) []byte { return appendFloat(dst, val, 64) } // AppendFloats64 encodes the input float64s to json and // appends the encoded string list to the input byte slice. func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = appendFloat(dst, vals[0], 32) if len(vals) > 1 { for _, val := range vals[1:] { dst = appendFloat(append(dst, ','), val, 64) } } dst = append(dst, ']') return dst } // AppendInterface marshals the input interface to a string and // appends the encoded string to the input byte slice. func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { marshaled, err := json.Marshal(i) if err != nil { return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) } return append(dst, marshaled...) } // AppendObjectData takes in an object that is already in a byte array // and adds it to the dst. func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { // Three conditions apply here: // 1. new content starts with '{' - which should be dropped OR // 2. new content starts with '{' - which should be replaced with ',' // to separate with existing content OR // 3. existing content has already other fields if o[0] == '{' { if len(dst) > 1 { dst = append(dst, ',') } o = o[1:] } else if len(dst) > 1 { dst = append(dst, ',') } return append(dst, o...) } // AppendIPAddr adds IPv4 or IPv6 address to dst. func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { return e.AppendString(dst, ip.String()) } // AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { return e.AppendString(dst, pfx.String()) } // AppendMACAddr adds MAC address to dst. func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { return e.AppendString(dst, ha.String()) }
8,797
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/json/time.go
package json import ( "strconv" "time" ) const ( // Import from zerolog/global.go timeFormatUnix = "" timeFormatUnixMs = "UNIXMS" timeFormatUnixMicro = "UNIXMICRO" ) // AppendTime formats the input time with the given format // and appends the encoded string to the input byte slice. func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { switch format { case timeFormatUnix: return e.AppendInt64(dst, t.Unix()) case timeFormatUnixMs: return e.AppendInt64(dst, t.UnixNano()/1000000) case timeFormatUnixMicro: return e.AppendInt64(dst, t.UnixNano()/1000) } return append(t.AppendFormat(append(dst, '"'), format), '"') } // AppendTimes converts the input times with the given format // and appends the encoded string list to the input byte slice. func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { switch format { case timeFormatUnix: return appendUnixTimes(dst, vals) case timeFormatUnixMs: return appendUnixMsTimes(dst, vals) } if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') if len(vals) > 1 { for _, t := range vals[1:] { dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') } } dst = append(dst, ']') return dst } func appendUnixTimes(dst []byte, vals []time.Time) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, vals[0].Unix(), 10) if len(vals) > 1 { for _, t := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) } } dst = append(dst, ']') return dst } func appendUnixMsTimes(dst []byte, vals []time.Time) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10) if len(vals) > 1 { for _, t := range vals[1:] { dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10) } } dst = append(dst, ']') return dst } // AppendDuration formats the input duration with the given unit & format // and appends the encoded string to the input byte slice. func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { if useInt { return strconv.AppendInt(dst, int64(d/unit), 10) } return e.AppendFloat64(dst, float64(d)/float64(unit)) } // AppendDurations formats the input durations with the given unit & format // and appends the encoded string list to the input byte slice. func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = e.AppendDuration(dst, vals[0], unit, useInt) if len(vals) > 1 { for _, d := range vals[1:] { dst = e.AppendDuration(append(dst, ','), d, unit, useInt) } } dst = append(dst, ']') return dst }
8,798
0
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal
kubeflow_public_repos/fate-operator/vendor/github.com/rs/zerolog/internal/json/string.go
package json import "unicode/utf8" const hex = "0123456789abcdef" var noEscapeTable = [256]bool{} func init() { for i := 0; i <= 0x7e; i++ { noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' } } // AppendStrings encodes the input strings to json and // appends the encoded string list to the input byte slice. func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') dst = e.AppendString(dst, vals[0]) if len(vals) > 1 { for _, val := range vals[1:] { dst = e.AppendString(append(dst, ','), val) } } dst = append(dst, ']') return dst } // AppendString encodes the input string to json and appends // the encoded string to the input byte slice. // // The operation loops though each byte in the string looking // for characters that need json or utf8 encoding. If the string // does not need encoding, then the string is appended in it's // entirety to the byte slice. // If we encounter a byte that does need encoding, switch up // the operation and perform a byte-by-byte read-encode-append. func (Encoder) AppendString(dst []byte, s string) []byte { // Start with a double quote. dst = append(dst, '"') // Loop through each character in the string. for i := 0; i < len(s); i++ { // Check if the character needs encoding. Control characters, slashes, // and the double quote need json encoding. Bytes above the ascii // boundary needs utf8 encoding. if !noEscapeTable[s[i]] { // We encountered a character that needs to be encoded. Switch // to complex version of the algorithm. dst = appendStringComplex(dst, s, i) return append(dst, '"') } } // The string has no need for encoding an therefore is directly // appended to the byte slice. dst = append(dst, s...) // End with a double quote return append(dst, '"') } // appendStringComplex is used by appendString to take over an in // progress JSON string encoding that encountered a character that needs // to be encoded. func appendStringComplex(dst []byte, s string, i int) []byte { start := 0 for i < len(s) { b := s[i] if b >= utf8.RuneSelf { r, size := utf8.DecodeRuneInString(s[i:]) if r == utf8.RuneError && size == 1 { // In case of error, first append previous simple characters to // the byte slice if any and append a remplacement character code // in place of the invalid sequence. if start < i { dst = append(dst, s[start:i]...) } dst = append(dst, `\ufffd`...) i += size start = i continue } i += size continue } if noEscapeTable[b] { i++ continue } // We encountered a character that needs to be encoded. // Let's append the previous simple characters to the byte slice // and switch our operation to read and encode the remainder // characters byte-by-byte. if start < i { dst = append(dst, s[start:i]...) } switch b { case '"', '\\': dst = append(dst, '\\', b) case '\b': dst = append(dst, '\\', 'b') case '\f': dst = append(dst, '\\', 'f') case '\n': dst = append(dst, '\\', 'n') case '\r': dst = append(dst, '\\', 'r') case '\t': dst = append(dst, '\\', 't') default: dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) } i++ start = i } if start < len(s) { dst = append(dst, s[start:]...) } return dst }
8,799