index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos | kubeflow_public_repos/batch-predict/CONTRIBUTING.md | <!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [How to Contribute](#how-to-contribute)
- [Contributor License Agreement](#contributor-license-agreement)
- [Code reviews](#code-reviews)
- [Get involved](#get-involved)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
| 8,500 |
0 | kubeflow_public_repos | kubeflow_public_repos/batch-predict/setup.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package Setup for the Kubeflow Batch Prediction.
"""
import os
from setuptools import find_packages
from setuptools import setup
def get_required_install_packages():
global_names = {}
# pylint: disable=exec-used
with open(os.path.normpath('kubeflow_batch_predict/version.py')) as f:
exec(f.read(), global_names)
return global_names['required_install_packages_with_batch_prediction']
def get_version():
global_names = {}
# pylint: disable=exec-used
with open(os.path.normpath('kubeflow_batch_predict/version.py')) as f:
exec(f.read(), global_names)
return global_names['__version__']
setup(
name='kubeflow_batch_predict',
version=get_version(),
author='Google',
author_email='[email protected]',
install_requires=get_required_install_packages(),
packages=find_packages(),
include_package_data=True,
description='Kubeflow Batch Predict Package',
requires=[])
| 8,501 |
0 | kubeflow_public_repos | kubeflow_public_repos/batch-predict/OWNERS | approvers:
- jlewi
- yixinshi
reviewers:
- lluunn
- kunmingg
| 8,502 |
0 | kubeflow_public_repos/batch-predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Machine Learning Prediction SDK.
"""
| 8,503 |
0 | kubeflow_public_repos/batch-predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/version.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kubeflow Batch Predict version constants.
"""
# Google Cloud Machine Learning Prediction Engine Version info.
__version__ = '0.1-alpha'
required_install_packages = [
'oauth2client >= 2.2.0',
'six == 1.10.0',
'bs4 >= 0.0.1, < 1.0',
'numpy >= 1.10.4', # Don't pin numpy, as it requires a recompile.
'crcmod >= 1.7, < 2.0',
'nltk >= 3.2.1, < 4.0',
'pyyaml >= 3.11, < 4.0',
'protobuf >= 3.1.0, < 4.0',
'enum34 >= 1.1',
'tensorflow >= 1.0.0',
]
required_install_packages_with_batch_prediction = required_install_packages + [
'apache-beam[gcp] >= 2.5.0',
]
| 8,504 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,505 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/batch_prediction_main.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataflow pipeline for batch prediction in Kubeflow.
The output data is the prediction results in the format of a list of json
object. Each key of the object is the name of the tensor to fetch. The value is
the value of that tensor.
This tool expects as input a file or file pattern. Currently it supports two
kinds of inputs.
1) Input file(s) in TFRecord format. Each record contains a string that can
be consumed by the graph.
2) Input files(s) in Text/JSON format. Each line contains either
a) numbers, or (possibly nested) lists of numbers.
b) a string that can be consumed by the graph.
Note that the graph can accept one or more input tensor values. The tensor
names should be logical names that are specified when constructing the graph
where the map of logical tensor names to physical tensor names are provided
by user code.
One should also specify the model location where the model in SavedModel format
are saved. The location can be on local disk (when running locally) or on GCS.
Another mandatory flag is the output location, which is a directory on local
disk or on GCS.
This program can be used to run the pipeline or to generate a Dataflow template
for later use.
"""
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from tensorflow.python.saved_model import tag_constants
from kubeflow_batch_predict import prediction as mlprediction
from kubeflow_batch_predict.dataflow import _aggregators as aggregators
from kubeflow_batch_predict.dataflow import batch_prediction_pipeline
FILE_FORMAT_SUPPORTED = ["json", "tfrecord", "tfrecord_gzip"]
OUTPUT_FORMAT_SUPPORTED = ["json", "csv"]
FRAMEWORKS_SUPPORTED = [
mlprediction.TENSORFLOW_FRAMEWORK_NAME,
mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME,
mlprediction.XGBOOST_FRAMEWORK_NAME
]
FILE_LIST_SEPARATOR = ","
# The default values for the pipeline options we want to customize.
DEFAULT_DATFLOW_PIPELINE_OPTIONS = {"disk_size_gb": 250}
class BatchPredictionOptions(PipelineOptions):
"""Parse the command line arguments."""
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
"--input_file_format",
dest="input_file_format",
default="json",
choices=FILE_FORMAT_SUPPORTED,
help=("The input file format for batch prediction. "
"Supported formats: %s" % FILE_FORMAT_SUPPORTED))
parser.add_argument(
"--output_file_format",
dest="output_file_format",
default="json",
choices=OUTPUT_FORMAT_SUPPORTED,
help=("The output file format for batch prediction. "
"Supported formats: %s" % OUTPUT_FORMAT_SUPPORTED))
# TODO(user): consider to use "action=append"-style argparse flag.
parser.add_value_provider_argument(
"--input_file_patterns",
dest="input_file_patterns",
help=("The input data files or file patterns for batch prediction. Use "
"%s to separate multiple files/patterns" % FILE_LIST_SEPARATOR))
parser.add_value_provider_argument(
"--output_result_prefix",
dest="output_result_prefix",
help="Output path to save the prediction results.")
parser.add_value_provider_argument(
"--output_error_prefix",
dest="output_error_prefix",
help="Output path to save the prediction errors.")
parser.add_value_provider_argument(
"--tags",
dest="tags",
default=tag_constants.SERVING,
help="List of tags for serving graph, separated by commas.")
parser.add_value_provider_argument(
"--signature_name",
dest="signature_name",
default=None,
help="The key of the signature map for serving signature.")
parser.add_value_provider_argument(
"--model_dir",
dest="model_dir",
help=("The path to the model where the tensorflow meta graph "
"proto and checkpoint files are saved. Normally, it is "
"the exported directory by session_bundle library."))
parser.add_value_provider_argument(
"--batch_size",
dest="batch_size",
type=int,
default=64,
help=("Number of records in one batch in the input data. All items in "
"the same batch would be fed into tf session together so only "
"one Session.Run() is invoked for one batch. If the batch_size "
"has been embedded in the graph, the flag must match that value. "
"If the first dim of the input tensors is None, this means any "
"batch size value can be used. Thereby one can specify any int "
"value to this flag. If no batch size is specified in the graph, "
"the flag must take value of 1. Otherwise, the program will "
"issue an error that shapes doesn't match."))
parser.add_value_provider_argument(
"--user_project_id",
dest="user_project_id",
help=("User's CloudML project id. It is not the project id of the "
"Dataflow job. The logs are sent to user job project in "
"Stackdriver with job id as its label."))
parser.add_value_provider_argument(
"--user_job_id",
dest="user_job_id",
help=("User's CloudML job id. It is not the job id of the Dataflow job."
" The logs are sent to user job project in Stackdriver with job"
" id as its label."))
parser.add_value_provider_argument(
"--framework",
dest="framework",
default=mlprediction.TENSORFLOW_FRAMEWORK_NAME,
help=("The framework used to train the model against. Supported "
"frameworks: %s" % FRAMEWORKS_SUPPORTED))
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
dataflow_pipeline_options = PipelineOptions(
**DEFAULT_DATFLOW_PIPELINE_OPTIONS)
logging.info("Dataflow option: %s",
dataflow_pipeline_options.get_all_options())
# Create the pipeline
p = beam.Pipeline(options=dataflow_pipeline_options)
# Create a dict of aggregators.
aggregator_dict = aggregators.CreateAggregatorsDict()
# Actually start the pipeline
result = batch_prediction_pipeline.run(
p,
dataflow_pipeline_options.view_as(BatchPredictionOptions),
aggregator_dict)
# From beam 2.1.0, the main thread exits. When using DirectRunner, we want to
# explicitly block the execution when DirectRunner is used.
if dataflow_pipeline_options.get_all_options().get("runner",
None) != "DataflowRunner":
result.wait_until_finish()
| 8,506 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/batch_prediction.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kubeflow batch prediction dataflow transforms.
"""
# TODO(user): add a unittest to test logging futures.
import datetime
import json
import logging
import threading
import traceback
import apache_beam as beam
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.transforms import window
from apache_beam.utils.windowed_value import WindowedValue
from kubeflow_batch_predict import prediction as mlprediction
from kubeflow_batch_predict.dataflow import _aggregators as aggregators
from kubeflow_batch_predict.dataflow import _error_filter as error_filter
from tensorflow.python.saved_model import tag_constants
DEFAULT_BATCH_SIZE = 1000 # 1K instances per batch when evaluating models.
LOG_SIZE_LIMIT = 1000 # 1K bytes for the input field in log entries.
LOG_NAME = "worker"
_METRICS_NAMESPACE = "cloud_ml_batch_predict"
class EmitAsBatchDoFn(beam.DoFn):
"""A DoFn that buffers the records and emits them batch by batch."""
def __init__(self, desired_batch_size):
"""Constructor of EmitAsBatchDoFn beam.DoFn class.
Args:
desired_batch_size: the desired size we want to buffer the records before
emitting.
"""
if isinstance(desired_batch_size, int):
desired_batch_size = StaticValueProvider(int, desired_batch_size)
self._desired_batch_size = desired_batch_size
self._batch = []
# Metrics.
self._batch_size_distribution = beam.metrics.Metrics.distribution(
_METRICS_NAMESPACE, "batch_size")
self._num_instances = beam.metrics.Metrics.counter(_METRICS_NAMESPACE,
"num_instances")
def _flush_batch(self):
self._batch_size_distribution.update(len(self._batch))
self._num_instances.inc(len(self._batch))
result = self._batch
self._batch = []
return result
# TODO(user): Remove the context and try catch after sdk update
def process(self, element):
try:
element = element.element
except AttributeError:
pass
self._batch.append(element)
if len(self._batch) >= self._desired_batch_size.get():
yield self._flush_batch()
def finish_bundle(self, context=None):
if self._batch:
yield WindowedValue(self._flush_batch(), -1, [window.GlobalWindow()])
class PredictionDoFn(beam.DoFn):
"""A DoFn class loading the model to create session and performing prediction.
The input PCollection consists of a list of strings from the input files.
The DoFn first loads model from a given path where meta graph data and
checkpoint data are exported to. Then if the there is only one string input
tensor or the model needs to preprocess the input, it directly passes the
data to prediction. Otherwise, it tries to load the data into JSON.
Then it batches the inputs of each instance into one feed_dict. After that, it
runs session and predicts the interesting values for all the instances.
Finally it emits the prediction result for each individual instance.
"""
class _ModelState(object):
"""Atomic representation of the in-memory state of the model."""
def __init__(self,
model_dir,
tags,
framework=mlprediction.TENSORFLOW_FRAMEWORK_NAME):
self.model_dir = model_dir
client = mlprediction.create_client(framework, model_dir, tags)
self.model = mlprediction.create_model(client, model_dir, framework)
_thread_local = threading.local()
def __init__(self,
aggregator_dict=None,
user_project_id="",
user_job_id="",
tags=tag_constants.SERVING,
signature_name="",
skip_preprocessing=False,
target="",
config=None,
framework=mlprediction.TENSORFLOW_FRAMEWORK_NAME):
"""Constructor of Prediction beam.DoFn class.
Args:
aggregator_dict: A dict of aggregators containing maps from counter name
to the aggregator.
user_project_id: A string. The project to which the logs will be sent.
user_job_id: A string. The job to which the logs will be sent.
tags: A comma-separated string that contains a list of tags for serving
graph.
signature_name: A string to map into the signature map to get the serving
signature.
skip_preprocessing: bool whether to skip preprocessing even when
the metadata.yaml/metadata.json file exists.
target: The execution engine to connect to. See target in tf.Session(). In
most cases, users should not set the target.
config: A ConfigProto proto with configuration options. See config in
tf.Session()
framework: The framework used to train this model. Available frameworks:
"TENSORFLOW", "SCIKIT_LEARN", and "XGBOOST".
Side Inputs:
model_dir: The directory containing the model to load and the
checkpoint files to restore the session.
"""
self._target = target
self._user_project_id = user_project_id
self._user_job_id = user_job_id
self._tags = tags
self._signature_name = signature_name
self._skip_preprocessing = skip_preprocessing
self._config = config
self._aggregator_dict = aggregator_dict
self._model_state = None
self._tag_list = []
self._framework = framework
# Metrics.
self._model_load_seconds_distribution = beam.metrics.Metrics.distribution(
_METRICS_NAMESPACE, "model_load_seconds")
self._batch_process_ms_distribution = beam.metrics.Metrics.distribution(
_METRICS_NAMESPACE, "batch_process_milliseconds")
def start_bundle(self):
user_project_id = self._user_project_id.get()
user_job_id = self._user_job_id.get()
self._tag_list = self._tags.get().split(",")
self._signature_name = self._signature_name.get()
def process(self, element, model_dir):
try:
if isinstance(model_dir, ValueProvider):
model_dir = model_dir.get()
framework = self._framework.get()
if self._model_state is None:
if (getattr(self._thread_local, "model_state", None) is None or
self._thread_local.model_state.model_dir != model_dir):
start = datetime.datetime.now()
self._thread_local.model_state = self._ModelState(
model_dir, self._tag_list, framework)
self._model_load_seconds_distribution.update(
int((datetime.datetime.now() - start).total_seconds()))
self._model_state = self._thread_local.model_state
else:
assert self._model_state.model_dir == model_dir
# Measure the processing time.
start = datetime.datetime.now()
# Try to load it.
if framework == mlprediction.TENSORFLOW_FRAMEWORK_NAME:
# Even though predict() checks the signature in TensorFlowModel,
# we need to duplicate this check here to determine the single string
# input case.
self._signature_name, signature = self._model_state.model.get_signature(
self._signature_name)
if self._model_state.model.is_single_string_input(signature):
loaded_data = element
else:
loaded_data = [json.loads(d) for d in element]
else:
loaded_data = [json.loads(d) for d in element]
instances = mlprediction.decode_base64(loaded_data)
# Actual prediction occurs.
kwargs = {}
if self._signature_name:
kwargs = {"signature_name": self._signature_name}
inputs, predictions = self._model_state.model.predict(instances, **kwargs)
predictions = list(predictions)
if self._aggregator_dict:
self._aggregator_dict[aggregators.AggregatorName.ML_PREDICTIONS].inc(
len(predictions))
# For successful processing, record the time.
td = datetime.datetime.now() - start
time_delta_in_ms = int(
td.microseconds / 10**3 + (td.seconds + td.days * 24 * 3600) * 10**3)
self._batch_process_ms_distribution.update(time_delta_in_ms)
for i, p in zip(inputs, predictions):
yield i, p
except mlprediction.PredictionError as e:
logging.error("Got a known exception: [%s]\n%s", str(e),
traceback.format_exc())
clean_error_detail = error_filter.filter_tensorflow_error(e.error_detail)
# Track in the counter.
if self._aggregator_dict:
counter_name = aggregators.AggregatorName.ML_FAILED_PREDICTIONS
self._aggregator_dict[counter_name].inc(len(element))
# reraise failure to load model as permanent exception to end dataflow job
if e.error_code == mlprediction.PredictionError.FAILED_TO_LOAD_MODEL:
raise beam.utils.retry.PermanentException(clean_error_detail)
try:
yield beam.pvalue.TaggedOutput("errors", (clean_error_detail,
element))
except AttributeError:
yield beam.pvalue.SideOutputValue("errors", (clean_error_detail,
element))
except Exception as e: # pylint: disable=broad-except
logging.error("Got an unknown exception: [%s].", traceback.format_exc())
# Track in the counter.
if self._aggregator_dict:
counter_name = aggregators.AggregatorName.ML_FAILED_PREDICTIONS
self._aggregator_dict[counter_name].inc(len(element))
try:
yield beam.pvalue.TaggedOutput("errors", (str(e), element))
except AttributeError:
yield beam.pvalue.SideOutputValue("errors", (str(e), element))
class BatchPredict(beam.PTransform):
"""A transform to load tensorflow model and do prediction.
The transform first reads prediction instance from the input. Then it loads
the tensorflow model from disk and restores the session. For each input, it
performs prediction and emits the results.
"""
def __init__(self,
model_dir,
tags=tag_constants.SERVING,
signature_name="",
batch_size=DEFAULT_BATCH_SIZE,
aggregator_dict=None,
user_project_id="",
user_job_id="",
target="",
config=None,
return_input=False,
framework=mlprediction.TENSORFLOW_FRAMEWORK_NAME,
**kwargs):
"""Constructs the transform.
Args:
model_dir: a Pvalue singleton of model directory that contains model
graph and model parameter files.
tags: A comma-separated string that contains a list of tags for
serving graph.
signature_name: A string to map into the signature map to get the serving
signature.
batch_size: the number of records in one batch or a ValueProvider of
integer. All the instances in the same batch would be fed
into tf session together thereby only on Session.Run() is
invoked for one batch.
aggregator_dict: A dict of aggregators containing maps from counter name
to the aggregator.
user_project_id: A string or a ValueProvider of string.
The project to which the logs will be sent.
user_job_id: A string or a ValueProvider of string. The job to which
the logs will be sent.
target: The execution engine to connect to. Optional. See target in
tf.Session()
config: A ConfigProto proto with configuration options. Optional. See
config in tf.Session()
return_input: if true, the transforms returns a tuple of [input, output]
otherwise only the output is returned.
framework: The framework used to train this model. Available frameworks:
"TENSORFLOW", "SCIKIT_LEARN", and "XGBOOST".
**kwargs: Other named arguments, e.g. label, passed to base PTransform.
"""
super(BatchPredict, self).__init__(**kwargs)
if not isinstance(batch_size, (int, ValueProvider)):
raise TypeError("%s: batch_size must be of type int"
" or ValueProvider; got %r instead"
% (self.__class__.__name__, batch_size))
self._batch_size = batch_size
if isinstance(batch_size, int):
self._batch_size = StaticValueProvider(int, batch_size)
self._framework = framework
if isinstance(framework, basestring):
self._framework = StaticValueProvider(str, framework)
# Tags
self._tags = tags
if isinstance(tags, basestring):
self._tags = StaticValueProvider(str, tags)
# Signature name
if not isinstance(signature_name, (basestring, ValueProvider)):
raise TypeError("%s: signature_name must be of type string"
" or ValueProvider; got %r instead"
% (self.__class__.__name__, signature_name))
self._signature_name = signature_name
if isinstance(signature_name, basestring):
self._signature_name = StaticValueProvider(str, signature_name)
# user_project_id
if not isinstance(user_project_id, (basestring, ValueProvider)):
raise TypeError("%s: user_project_id must be of type string"
" or ValueProvider; got %r instead"
% (self.__class__.__name__, user_project_id))
self._user_project_id = user_project_id
if isinstance(user_project_id, basestring):
self._user_project_id = StaticValueProvider(str, user_project_id)
# user_job_id
if not isinstance(user_job_id, (basestring, ValueProvider)):
raise TypeError("%s: user_job_id must be of type string"
" or ValueProvider; got %r instead"
% (self.__class__.__name__, user_job_id))
self._user_job_id = user_job_id
if isinstance(user_job_id, basestring):
self._user_job_id = StaticValueProvider(str, user_job_id)
# None-value-provider variable.
self._aggregator_dict = aggregator_dict
self._target = target
self._config = config
self._model_dir = model_dir
self._return_input = return_input
def apply(self, data):
return self.expand(data)
def expand(self, data):
"""Apply the transform.
Args:
data: A PCollection of records containing the data to predict.
Returns:
A PCollection of prediction records and errors
"""
result = (data | "Batch" >> beam.ParDo(EmitAsBatchDoFn(self._batch_size))
| "Prediction" >> beam.ParDo(
PredictionDoFn(
self._aggregator_dict,
self._user_project_id,
self._user_job_id,
self._tags,
self._signature_name,
skip_preprocessing=False,
target=self._target,
config=self._config,
framework=self._framework), self._model_dir).with_outputs(
"errors", main="main"))
input_output, errors = result.main, result.errors
if self._return_input:
output_data = input_output
else:
output_data = input_output | beam.Map(lambda (_, prediction): prediction)
return output_data, errors
| 8,507 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/_error_filter.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for cleaning dataflow errors to be user friendly."""
TENSORFLOW_OP_MATCHER = "\n\nCaused by op"
def filter_tensorflow_error(error_string):
"""Removes information from a tensorflow error to hide Dataflow details.
TF appends the operation details if they exist, but the stacktrace
is not useful to the user, so we remove it if present.
Args:
error_string: PredictionError error detail, error caught during Session.run
Returns:
error_string with only base error message instead of full traceback.
"""
return error_string.split(TENSORFLOW_OP_MATCHER)[0]
| 8,508 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/batch_prediction_pipeline.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataflow pipeline for batch prediction in Kubeflow."""
import cStringIO
import csv
import json
import apache_beam as beam
from apache_beam.io.textio import WriteToText
from apache_beam.transforms.combiners import Sample
import batch_prediction
from kubeflow_batch_predict.dataflow.io.multifiles_source import ReadFromMultiFilesText
from kubeflow_batch_predict.dataflow.io.multifiles_source import ReadFromMultiFilesTFRecord
from kubeflow_batch_predict.dataflow.io.multifiles_source import ReadFromMultiFilesTFRecordGZip
def keys_to_csv(keys):
output = cStringIO.StringIO()
csv_writer = csv.writer(output)
csv_writer.writerow(keys)
return output.getvalue()
def values_to_csv(entry, keys):
output = cStringIO.StringIO()
csv_writer = csv.DictWriter(output, keys)
csv_writer.writerow(entry)
return output.getvalue()
def run(p, args, aggregator_dict):
"""Run the pipeline with the args and dataflow pipeline option."""
# Create a PCollection for model directory.
model_dir = p | "Create Model Directory" >> beam.Create([args.model_dir])
input_file_format = args.input_file_format.lower()
input_file_patterns = args.input_file_patterns
# Setup reader.
if input_file_format == "json":
reader = p | "READ_TEXT_FILES" >> ReadFromMultiFilesText(
input_file_patterns)
elif input_file_format == "tfrecord":
reader = p | "READ_TF_FILES" >> ReadFromMultiFilesTFRecord(
input_file_patterns)
elif input_file_format == "tfrecord_gzip":
reader = p | "READ_TFGZIP_FILES" >> ReadFromMultiFilesTFRecordGZip(
input_file_patterns)
# Setup the whole pipeline.
results, errors = (reader
| "BATCH_PREDICTION" >> batch_prediction.BatchPredict(
beam.pvalue.AsSingleton(model_dir),
tags=args.tags,
signature_name=args.signature_name,
batch_size=args.batch_size,
aggregator_dict=aggregator_dict,
user_project_id=args.user_project_id,
user_job_id=args.user_job_id,
framework=args.framework))
output_file_format = args.output_file_format.lower()
# Convert predictions to target format and then write to output files.
if output_file_format == "json":
_ = (results
| "TO_JSON" >> beam.Map(json.dumps)
| "WRITE_PREDICTION_RESULTS" >> WriteToText(
args.output_result_prefix))
elif output_file_format == "csv":
fields = (results
| "SAMPLE_SINGLE_ELEMENT" >> Sample.FixedSizeGlobally(1)
| "GET_KEYS" >> beam.Map(
# entry could be None if no inputs were valid
lambda entry: entry[0].keys() if entry else []))
_ = (fields
| "KEYS_TO_CSV" >> beam.Map(keys_to_csv)
| "WRITE_KEYS" >> WriteToText(
args.output_result_prefix,
file_name_suffix="_header.csv",
shard_name_template=""))
_ = (results
| "VALUES_TO_CSV" >> beam.Map(values_to_csv,
beam.pvalue.AsSingleton(fields))
| "WRITE_PREDICTION_RESULTS" >> WriteToText(
args.output_result_prefix,
file_name_suffix=".csv",
append_trailing_newlines=False))
# Write prediction errors counts to output files.
_ = (errors
| "GROUP_BY_ERROR_TYPE" >> beam.combiners.Count.PerKey()
| "WRITE_ERRORS" >> WriteToText(
args.output_error_prefix))
return p.run()
| 8,509 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/_aggregators.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric library for Kubeflow batch prediction dataflow transforms.
"""
# TODO(user): Get rid of this file and instead just use counters and other
# metrics directly in the body of the pertinent DoFns.
from apache_beam.metrics import Metrics
class AggregatorName(object):
"""Names of the metrics."""
ML_PREDICTIONS = "ml-predictions"
ML_FAILED_PREDICTIONS = "ml-failed-predictions"
# The aggregator config.
CONFIG_ = [AggregatorName.ML_PREDICTIONS, AggregatorName.ML_FAILED_PREDICTIONS]
def CreateAggregatorsDict(namespace="main"):
"""Creates metrics dict."""
return {name: Metrics.counter(namespace, name) for name in CONFIG_}
| 8,510 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/io/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,511 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/dataflow/io/multifiles_source.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Multiple files/file patterns source.
Multiple File source, which reads the union of multiple files and/or file
patterns.
"""
from apache_beam import coders
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.io.tfrecordio import _TFRecordSource as TFRecordSource
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
# pylint: disable=g-import-not-at-top
try:
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import StaticValueProvider
except ImportError:
from apache_beam.utils.value_provider import ValueProvider
from apache_beam.utils.value_provider import StaticValueProvider
# pylint: enable=g-import-not-at-top
FILE_LIST_SEPARATOR = ','
class MultiFilesSource(iobase.BoundedSource):
"""Base class for multiple files source.
Support to read multiple files or file patterns separated by a comma. Subclass
should implement create_source() to actually create sources to use.
"""
def __init__(self, file_patterns, **kwargs):
# Handle the templated values.
if not isinstance(file_patterns, (basestring, ValueProvider)):
raise TypeError('%s: file_pattern must be of type string'
' or ValueProvider; got %r instead'
% (self.__class__.__name__, file_patterns))
if isinstance(file_patterns, basestring):
file_patterns = StaticValueProvider(str, file_patterns)
self._file_patterns = file_patterns
self._sources = []
self._kwargs = kwargs
def _populate_sources_lazily(self):
# We need to do it lazily because self._file_patterns can be a templated
# value and must be evaluated at runtime.
if not self._sources:
# dedup repeated files or file patterns.
for file_pattern in list(set(self._file_patterns.get().split(
FILE_LIST_SEPARATOR))):
self._sources.append(self.create_source(file_pattern.strip(),
**self._kwargs))
def estimate_size(self):
self._populate_sources_lazily()
return sum(s.estimate_size() for s in self._sources)
def get_range_tracker(self, start_position, stop_position):
self._populate_sources_lazily()
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._sources)
return OffsetRangeTracker(start_position, stop_position)
def create_source(self, file_pattern, **kwargs):
raise NotImplementedError('MultiFilesSource cannot be used directly.')
def read(self, range_tracker):
self._populate_sources_lazily()
start_source = range_tracker.start_position()
stop_source = range_tracker.stop_position()
for source_ix in range(start_source, stop_source):
if not range_tracker.try_claim(source_ix):
break
sub_range_tracker = self._sources[source_ix].get_range_tracker(None, None)
for record in self._sources[source_ix].read(sub_range_tracker):
yield record
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
self._populate_sources_lazily()
if start_position or stop_position:
raise ValueError(
'Multi-files initial splitting is not supported. Expected start and '
'stop positions to be None. Received %r and %r respectively.' %
(start_position, stop_position))
for source in self._sources:
for bundle in source.split(desired_bundle_size):
yield bundle
def display_data(self):
return {'file_patterns': DisplayDataItem(str(self._file_patterns),
label='File Patterns')}
class _MultiTextSource(MultiFilesSource):
"""Multiple files source for Text source."""
# TODO(user): Currently liquid sharding is performed on source boundaries.
# For text files, a more complicated RangeTracker can be implemented to
# support liquid sharding within sub-sources if needed. See ConcatRangeTracker
# in concat_source.py for reference.
def create_source(self, file_pattern, min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0):
return TextSource(file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coders.StrUtf8Coder(),
validate=validate,
skip_header_lines=skip_header_lines)
# TODO(user): currently compression_type is not a ValueProvider valure in
# filebased_source, thereby we have to make seperate classes for
# non-compressed and compressed version of TFRecord sources. Consider to
# make the compression_type a ValueProvider in filebased_source.
class _MultiTFRecordSource(MultiFilesSource):
"""Multiple files source for TFRecord source."""
def create_source(self, file_pattern):
return TFRecordSource(
file_pattern=file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True)
class _MultiTFRecordGZipSource(MultiFilesSource):
"""Multiple files source for TFRecord source gzipped."""
def create_source(self, file_pattern):
return TFRecordSource(
file_pattern=file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.GZIP,
validate=True)
class ReadFromMultiFilesText(PTransform):
"""A PTransform for reading text files or files patterns.
It is a wrapper of ReadFromText but supports multiple files or
files patterns.
"""
def __init__(
self,
file_patterns,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the ReadFromText transform.
Args:
file_patterns: The file paths/patterns to read from as local file paths
or GCS files. Paths/patterns seperated by commas.
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is CompressionTypes.AUTO, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
coder: Coder used to decode each line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
**kwargs: optional args dictionary.
"""
super(ReadFromMultiFilesText, self).__init__(**kwargs)
self._source = _MultiTextSource(
file_patterns,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromMultiFilesTFRecord(PTransform):
"""Transform for reading multiple TFRecord sources.
It is a wrapper of ReadFromTFRecord but supports multiple files or
files patterns.
"""
def __init__(self,
file_patterns,
**kwargs):
"""Initialize a ReadFromMultiFilesTFRecord transform.
Args:
file_patterns: file glob patterns to read TFRecords from.
**kwargs: optional args dictionary.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromMultiFilesTFRecord, self).__init__(**kwargs)
self._source = _MultiTFRecordSource(file_patterns)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromMultiFilesTFRecordGZip(PTransform):
"""Transform for reading multiple TFRecord Gzipped sources.
It is a wrapper of ReadFromTFRecord gzipped but supports multiple files or
files patterns.
"""
def __init__(self,
file_patterns,
**kwargs):
"""Initialize a ReadFromMultiFilesTFRecordGzip transform.
Args:
file_patterns: file glob patterns to read TFRecords from.
**kwargs: optional args dictionary.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromMultiFilesTFRecordGZip, self).__init__(**kwargs)
self._source = _MultiTFRecordGZipSource(file_patterns)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
| 8,512 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/prediction/_interfaces.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interfaces and other classes for providing custom code for prediction."""
class Model(object):
"""A Model performs predictions on a given list of instances.
The input instances are the raw values sent by the user. It is the
responsibility of a Model to translate these instances into
actual predictions.
The input instances and the output use python data types. The input
instances have been decoded prior to being passed to the predict
method. The output, which should use python data types is
encoded after being returned from the predict method.
"""
def predict(self, instances, **kwargs):
"""Returns predictions for the provided instances.
Instances are the decoded values from the request. Clients need not worry
about decoding json nor base64 decoding.
Args:
instances: list of instances, as described in the API.
**kwargs: additional keyword arguments, will be passed into the client's
predict method
Returns:
A two-element tuple (inputs, outputs). Both inputs and outputs are
lists. Each input/output is a dict mapping input/output alias to the
value for that input/output.
Raises:
PredictionError: if an error occurs during prediction.
"""
raise NotImplementedError()
@classmethod
def from_client(cls, client, model_path):
"""Creates a model using the given client and path.
Path is useful, e.g., to load files from the exported directory containing
the model.
Args:
client: An instance of PredictionClient for performing prediction.
model_path: The path to the stored model.
Returns:
An instance implementing this Model class.
"""
raise NotImplementedError()
class PredictionClient(object):
"""A client for Prediction.
No assumptions are made about whether the prediction happens in process,
across processes, or even over the network.
The inputs, unlike Model.predict, have already been "columnarized", i.e.,
a dict mapping input names to values for a whole batch, much like
Session.run's feed_dict parameter. The return value is the same format.
"""
def __init__(self, *args, **kwargs):
pass
def predict(self, inputs, **kwargs):
"""Produces predictions for the given inputs.
Args:
inputs: a dict mapping input names to values
**kwargs: Additional keyword arguments for prediction
Returns:
A dict mapping output names to output values, similar to the input
dict.
"""
raise NotImplementedError()
class Processor(object):
"""Interface for constructing instance processors."""
@classmethod
def from_model_path(cls, model_path):
"""Creates a processor using the given model path.
Args:
model_path: The path to the stored model.
Returns:
An instance implementing this Processor class.
"""
raise NotImplementedError()
class Preprocessor(object):
"""Interface for processing a list of instances before prediction."""
def preprocess(self, instances, **kwargs):
"""The preprocessing function.
Args:
instances: a list of instances, as provided to the predict()
method.
**kwargs: Additional keyword arguments for preprocessing
Returns:
The processed instance to use in the predict() method.
"""
raise NotImplementedError()
class Postprocessor(object):
"""Interface for processing a list of instances after prediction."""
def postprocess(self, instances, **kwargs):
"""The postprocessing function.
Args:
instances: a list of instances, as provided to the predict()
method.
**kwargs: Additional keyword arguments for postprocessing
Returns:
The processed instance to return as the final prediction output.
"""
raise NotImplementedError()
| 8,513 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/prediction/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-import-not-at-top
"""Classes and methods for predictions on a trained machine learning model.
"""
from ._interfaces import Model
from ._interfaces import PredictionClient
from .prediction_lib import BaseModel
from .prediction_lib import canonicalize_single_tensor_input
from .prediction_lib import columnarize
from .prediction_lib import COLUMNARIZE_TIME
from .prediction_lib import create_client
from .prediction_lib import create_model
from .prediction_lib import create_sklearn_model
from .prediction_lib import create_xgboost_model
from .prediction_lib import decode_base64
from .prediction_lib import encode_base64
from .prediction_lib import ENGINE
from .prediction_lib import FRAMEWORK
from .prediction_lib import INPUTS_KEY
from .prediction_lib import load_model
from .prediction_lib import local_predict
from .prediction_lib import OUTPUTS_KEY
from .prediction_lib import PredictionError
from .prediction_lib import rowify
from .prediction_lib import ROWIFY_TIME
from .prediction_lib import SCIKIT_LEARN_FRAMEWORK_NAME
from .prediction_lib import SESSION_RUN_ENGINE_NAME
from .prediction_lib import SESSION_RUN_TIME
from .prediction_lib import SessionClient
from .prediction_lib import SklearnModel
from .prediction_lib import Stats
from .prediction_lib import TENSORFLOW_FRAMEWORK_NAME
from .prediction_lib import TensorFlowClient
from .prediction_lib import TensorFlowModel
from .prediction_lib import Timer
from .prediction_lib import XGBOOST_FRAMEWORK_NAME
from .prediction_lib import XGBoostModel
| 8,514 |
0 | kubeflow_public_repos/batch-predict/kubeflow_batch_predict | kubeflow_public_repos/batch-predict/kubeflow_batch_predict/prediction/prediction_lib.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running predictions.
"""
import base64
import collections
from contextlib import contextmanager
import inspect
import json
import logging
import os
import pickle
import pydoc # used for importing python classes from their FQN
import sys
import timeit
from ._interfaces import Model
from ._interfaces import PredictionClient
from enum import Enum
import numpy as np
import six
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
# --------------------------
# prediction.prediction_lib
# --------------------------
class UserClassType(Enum):
model_class = "model_class"
processor_class = "processor_class"
ENGINE = "Prediction-Engine"
ENGINE_RUN_TIME = "Prediction-Engine-Run-Time"
FRAMEWORK = "Framework"
SCIKIT_LEARN_FRAMEWORK_NAME = "scikit_learn"
XGBOOST_FRAMEWORK_NAME = "xgboost"
TENSORFLOW_FRAMEWORK_NAME = "tensorflow"
PREPROCESS_TIME = "Prediction-Preprocess-Time"
POSTPROCESS_TIME = "Prediction-Postprocess-Time"
# Keys for the name of the methods that the user provided `Processor`
# class should implement.
PREPROCESS_KEY = "preprocess"
POSTPROCESS_KEY = "postprocess"
FROM_MODEL_KEY = "from_model_path"
# Additional TF keyword arguments
INPUTS_KEY = "inputs"
OUTPUTS_KEY = "outputs"
SIGNATURE_KEY = "signature_name"
# Stats
COLUMNARIZE_TIME = "Prediction-Columnarize-Time"
UNALIAS_TIME = "Prediction-Unalias-Time"
ENCODE_TIME = "Prediction-Encode-Time"
SESSION_RUN_TIME = "Prediction-Session-Run-Time"
ALIAS_TIME = "Prediction-Alias-Time"
ROWIFY_TIME = "Prediction-Rowify-Time"
SESSION_RUN_ENGINE_NAME = "TF_SESSION_RUN"
# Scikit-learn and XGBoost related constants
MODEL_FILE_NAME_JOBLIB = "model.joblib"
MODEL_FILE_NAME_PICKLE = "model.pkl"
MODEL_FILE_NAME_BST = "model.bst"
PredictionErrorType = collections.namedtuple(
"PredictionErrorType", ("message", "code"))
class PredictionError(Exception):
"""Customer exception for known prediction exception."""
# The error code for prediction.
FAILED_TO_LOAD_MODEL = PredictionErrorType(
message="Failed to load model", code=0)
INVALID_INPUTS = PredictionErrorType("Invalid inputs", code=1)
FAILED_TO_RUN_MODEL = PredictionErrorType(
message="Failed to run the provided model", code=2)
INVALID_OUTPUTS = PredictionErrorType(
message="There was a problem processing the outputs", code=3)
INVALID_USER_CODE = PredictionErrorType(
message="There was a problem processing the user code", code=4)
# When adding new exception, please update the ERROR_MESSAGE_ list as well as
# unittest.
def __init__(self, error_code, error_detail, *args):
super(PredictionError, self).__init__(error_code, error_detail, *args)
@property
def error_code(self):
return self.args[0].code
@property
def error_message(self):
return self.args[0].message
@property
def error_detail(self):
return self.args[1]
def __str__(self):
return ("%s: %s (Error code: %d)" % (self.error_message,
self.error_detail, self.error_code))
MICRO = 1000000
MILLI = 1000
class Timer(object):
"""Context manager for timing code blocks.
The object is intended to be used solely as a context manager and not
as a general purpose object.
The timer starts when __enter__ is invoked on the context manager
and stopped when __exit__ is invoked. After __exit__ is called,
the duration properties report the amount of time between
__enter__ and __exit__ and thus do not change. However, if any of the
duration properties are called between the call to __enter__ and __exit__,
then they will return the "live" value of the timer.
If the same Timer object is re-used in multiple with statements, the values
reported will reflect the latest call. Do not use the same Timer object in
nested with blocks with the same Timer context manager.
Example usage:
with Timer() as timer:
foo()
print(timer.duration_secs)
"""
def __init__(self, timer_fn=None):
self.start = None
self.end = None
self._get_time = timer_fn or timeit.default_timer
def __enter__(self):
self.end = None
self.start = self._get_time()
return self
def __exit__(self, exc_type, value, traceback):
self.end = self._get_time()
return False
@property
def seconds(self):
now = self._get_time()
return (self.end or now) - (self.start or now)
@property
def microseconds(self):
return int(MICRO * self.seconds)
@property
def milliseconds(self):
return int(MILLI * self.seconds)
class Stats(dict):
"""An object for tracking stats.
This class is dict-like, so stats are accessed/stored like so:
stats = Stats()
stats["count"] = 1
stats["foo"] = "bar"
This class also facilitates collecting timing information via the
context manager obtained using the "time" method. Reported timings
are in microseconds.
Example usage:
with stats.time("foo_time"):
foo()
print(stats["foo_time"])
"""
@contextmanager
def time(self, name, timer_fn=None):
with Timer(timer_fn) as timer:
yield timer
self[name] = timer.microseconds
def columnarize(instances):
"""Columnarize inputs.
Each line in the input is a dictionary of input names to the value
for that input (a single instance). For each input "column", this method
appends each of the input values to a list. The result is a dict mapping
input names to a batch of input data. This can be directly used as the
feed dict during prediction.
For example,
instances = [{"a": [1.0, 2.0], "b": "a"},
{"a": [3.0, 4.0], "b": "c"},
{"a": [5.0, 6.0], "b": "e"},]
batch = prediction_server_lib.columnarize(instances)
assert batch == {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]}
Arguments:
instances: (list of dict) where the dictionaries map input names
to the values for those inputs.
Returns:
A dictionary mapping input names to values, as described above.
"""
columns = collections.defaultdict(list)
for instance in instances:
for k, v in six.iteritems(instance):
columns[k].append(v)
return columns
def rowify(columns):
"""Converts columnar input to row data.
Consider the following code:
columns = {"prediction": np.array([1, # 1st instance
0, # 2nd
1]), # 3rd
"scores": np.array([[0.1, 0.9], # 1st instance
[0.7, 0.3], # 2nd
[0.4, 0.6]])} # 3rd
Then rowify will return the equivalent of:
[{"prediction": 1, "scores": [0.1, 0.9]},
{"prediction": 0, "scores": [0.7, 0.3]},
{"prediction": 1, "scores": [0.4, 0.6]}]
(each row is yielded; no list is actually created).
Arguments:
columns: (dict) mapping names to numpy arrays, where the arrays
contain a batch of data.
Raises:
PredictionError: if the outer dimension of each input isn't identical
for each of element.
Yields:
A map with a single instance, as described above. Note: instances
is not a numpy array.
"""
sizes_set = {e.shape[0] for e in six.itervalues(columns)}
# All the elements in the length array should be identical. Otherwise,
# raise an exception.
if len(sizes_set) != 1:
sizes_dict = {name: e.shape[0] for name, e in six.iteritems(columns)}
raise PredictionError(
PredictionError.INVALID_OUTPUTS,
"Bad output from running tensorflow session: outputs had differing "
"sizes in the batch (outer) dimension. See the outputs and their "
"size: %s. Check your model for bugs that effect the size of the "
"outputs." % sizes_dict)
# Pick an arbitrary value in the map to get it's size.
num_instances = len(next(six.itervalues(columns)))
for row in six.moves.xrange(num_instances):
yield {
name: output[row, ...].tolist()
for name, output in six.iteritems(columns)
}
def canonicalize_single_tensor_input(instances, tensor_name):
"""Canonicalize single input tensor instances into list of dicts.
Instances that are single input tensors may or may not be provided with their
tensor name. The following are both valid instances:
1) instances = [{"x": "a"}, {"x": "b"}, {"x": "c"}]
2) instances = ["a", "b", "c"]
This function canonicalizes the input instances to be of type 1).
Arguments:
instances: single input tensor instances as supplied by the user to the
predict method.
tensor_name: the expected name of the single input tensor.
Raises:
PredictionError: if the wrong tensor name is supplied to instances.
Returns:
A list of dicts. Where each dict is a single instance, mapping the
tensor_name to the value (as supplied by the original instances).
"""
# Input is a single string tensor, the tensor name might or might not
# be given.
# There are 3 cases (assuming the tensor name is "t", tensor = "abc"):
# 1) {"t": "abc"}
# 2) "abc"
# 3) {"y": ...} --> wrong tensor name is given.
def parse_single_tensor(x, tensor_name):
if not isinstance(x, dict):
# case (2)
return {tensor_name: x}
elif len(x) == 1 and tensor_name == list(x.keys())[0]:
# case (1)
return x
else:
raise PredictionError(PredictionError.INVALID_INPUTS,
"Expected tensor name: %s, got tensor name: %s." %
(tensor_name, list(x.keys())))
if not isinstance(instances, list):
instances = [instances]
instances = [parse_single_tensor(x, tensor_name) for x in instances]
return instances
class BaseModel(Model):
"""The base definition of an internal Model interface.
"""
def __init__(self, client):
"""Constructs a BaseModel.
Args:
client: An instance of PredictionClient for performing prediction.
"""
self._client = client
self._user_processor = None
def preprocess(self, instances, stats=None, **kwargs):
"""Runs the preprocessing function on the instances.
Args:
instances: list of instances as provided to the predict() method.
stats: Stats object for recording timing information.
**kwargs: Additional keyword arguments for preprocessing.
Returns:
A new list of preprocessed instances. Each instance is as described
in the predict() method.
"""
pass
def postprocess(self, predicted_output, original_input=None, stats=None,
**kwargs):
"""Runs the postprocessing function on the instances.
Args:
predicted_output: list of instances returned by the predict() method on
preprocessed instances.
original_input: List of instances, before any pre-processing was applied.
stats: Stats object for recording timing information.
**kwargs: Additional keyword arguments for postprocessing.
Returns:
A new list of postprocessed instances.
"""
pass
def predict(self, instances, stats=None, **kwargs):
"""Runs preprocessing, predict, and postprocessing on the input."""
stats = stats or Stats()
self._validate_kwargs(kwargs)
with stats.time(PREPROCESS_TIME):
preprocessed = self.preprocess(instances, stats=stats, **kwargs)
with stats.time(ENGINE_RUN_TIME):
predicted_outputs = self._client.predict(
preprocessed, stats=stats, **kwargs)
with stats.time(POSTPROCESS_TIME):
postprocessed = self.postprocess(
predicted_outputs, original_input=instances, stats=stats, **kwargs)
return instances, postprocessed
def _validate_kwargs(self, kwargs):
"""Validates and sets defaults for extra predict keyword arguments.
Modifies the keyword args dictionary in-place. Keyword args will be included
into pre/post-processing and the client predict method.
Can raise Exception to error out of request on bad keyword args.
If no additional args are required, pass.
Args:
kwargs: Dictionary (str->str) of keyword arguments to check.
"""
pass
# TODO: when we no longer load the model to get the signature
# consider making this a named constructor on SessionClient.
def load_model(
model_path,
tags=(tag_constants.SERVING,),
config=None):
"""Loads the model at the specified path.
Args:
model_path: the path to either session_bundle or SavedModel
tags: the tags that determines the model to load.
config: tf.ConfigProto containing session configuration options.
Returns:
A pair of (Session, map<string, SignatureDef>) objects.
Raises:
PredictionError: if the model could not be loaded.
"""
if loader.maybe_saved_model_directory(model_path):
try:
logging.info("Importing tensorflow.contrib in load_model")
# pylint: disable=redefined-outer-name,unused-variable,g-import-not-at-top
import tensorflow as tf
import tensorflow.contrib
from tensorflow.python.framework.ops import Graph
# pylint: enable=redefined-outer-name,unused-variable,g-import-not-at-top
if tf.__version__.startswith("1.0"):
session = tf_session.Session(target="", graph=None, config=config)
else:
session = tf_session.Session(target="", graph=Graph(), config=config)
meta_graph = loader.load(session, tags=list(tags), export_dir=model_path)
except Exception as e: # pylint: disable=broad-except
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Failed to load the model due to bad model data."
" tags: %s\n%s" % (list(tags), str(e)))
else:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Cloud ML only supports TF 1.0 or above and models "
"saved in SavedModel format.")
if session is None:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Failed to create session when loading the model")
if not meta_graph.signature_def:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"MetaGraph must have at least one signature_def.")
# Remove invalid signatures from the signature map.
invalid_signatures = []
for signature_name in meta_graph.signature_def:
try:
signature = meta_graph.signature_def[signature_name]
_update_dtypes(session.graph, signature.inputs)
_update_dtypes(session.graph, signature.outputs)
except ValueError as e:
logging.warn("Error updating signature %s: %s", signature_name, str(e))
invalid_signatures.append(signature_name)
for signature_name in invalid_signatures:
del meta_graph.signature_def[signature_name]
return session, meta_graph.signature_def
def _update_dtypes(graph, interface):
"""Adds dtype to TensorInfos in interface if necessary.
If already present, validates TensorInfo matches values in the graph.
TensorInfo is updated in place.
Args:
graph: the TensorFlow graph; used to lookup datatypes of tensors.
interface: map from alias to TensorInfo object.
Raises:
ValueError: if the data type in the TensorInfo does not match the type
found in graph.
"""
for alias, info in six.iteritems(interface):
# Postpone conversion to enum for better error messages.
dtype = graph.get_tensor_by_name(info.name).dtype
if not info.dtype:
info.dtype = dtype.as_datatype_enum
elif info.dtype != dtype.as_datatype_enum:
raise ValueError("Specified data types do not match for alias %s. "
"Graph has %d while TensorInfo reports %d." %
(alias, dtype, info.dtype))
# (TODO): Move this to a Tensorflow specific library.
class TensorFlowClient(PredictionClient):
"""A client for Prediction that uses Session.run."""
def __init__(self, signature_map, *args, **kwargs):
self._signature_map = signature_map
super(TensorFlowClient, self).__init__(*args, **kwargs)
@property
def signature_map(self):
return self._signature_map
def get_signature(self, signature_name=None):
"""Gets tensorflow signature for the given signature_name.
Args:
signature_name: string The signature name to use to choose the signature
from the signature map.
Returns:
a pair of signature_name and signature. The first element is the
signature name in string that is actually used. The second one is the
signature.
Raises:
PredictionError: when the signature is not found with the given signature
name or when there are more than one signatures in the signature map.
"""
# The way to find signature is:
# 1) if signature_name is specified, try to find it in the signature_map. If
# not found, raise an exception.
# 2) if signature_name is not specified, check if signature_map only
# contains one entry. If so, return the only signature.
# 3) Otherwise, use the default signature_name and do 1).
if not signature_name and len(self.signature_map) == 1:
return (list(self.signature_map.keys())[0],
list(self.signature_map.values())[0])
key = (signature_name or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
if key in self.signature_map:
return key, self.signature_map[key]
else:
raise PredictionError(
PredictionError.INVALID_INPUTS,
"No signature found for signature key %s." % signature_name)
# (TODO): Move this to a Tensorflow specific library.
class SessionClient(TensorFlowClient):
"""A client for Prediction that uses Session.run."""
def __init__(self, session, signature_map):
self._session = session
super(SessionClient, self).__init__(signature_map)
def predict(self, inputs, stats=None,
signature_name=None, **unused_kwargs):
"""Produces predictions for the given inputs.
Args:
inputs: a dict mapping input names to values
stats: Stats object for recording timing information.
signature_name: name of SignatureDef to use in this prediction
**unused_kwargs: placeholder, pre/postprocess may have additional args
Returns:
A dict mapping output names to output values, similar to the input
dict.
"""
stats = stats or Stats()
stats[ENGINE] = "SessionRun"
stats[FRAMEWORK] = TENSORFLOW_FRAMEWORK_NAME
with stats.time(UNALIAS_TIME):
_, signature = self.get_signature(signature_name)
fetches = [output.name for output in signature.outputs.values()]
try:
unaliased = {
signature.inputs[key].name: val
for key, val in six.iteritems(inputs)
}
except Exception as e:
raise PredictionError(PredictionError.INVALID_INPUTS,
"Input mismatch: " + str(e))
with stats.time(SESSION_RUN_TIME):
try:
# TODO(): measure the actual session.run() time, even in the
# case of ModelServer.
outputs = self._session.run(fetches=fetches, feed_dict=unaliased)
except Exception as e:
logging.error("Exception during running the graph: " + str(e))
raise PredictionError(PredictionError.FAILED_TO_RUN_MODEL,
"Exception during running the graph: " + str(e))
with stats.time(ALIAS_TIME):
return dict(zip(six.iterkeys(signature.outputs), outputs))
def load_model_class(client, model_path):
"""Loads in the user specified custom Model class.
Args:
client: An instance of ModelServerClient for performing prediction.
model_path: the path to either session_bundle or SavedModel
Returns:
An instance of a Model.
Returns None if the user didn't specify the name of the custom
python class to load in the create_version_request.
Raises:
PredictionError: for any of the following:
(1) the user provided python model class cannot be found
(2) if the loaded class does not implement the Model interface.
"""
model_class = load_custom_class(UserClassType.model_class)
if not model_class:
return None
model_instance = model_class.from_client(client, model_path)
_validate_model_class(model_instance)
return model_instance
def load_custom_class(class_type):
"""Loads in the user specified custom class.
Args:
class_type: An instance of UserClassType specifying what type of class to
load.
Returns:
An instance of a class specified by the user in the `create_version_request`
or None if no such class was specified.
Raises:
PredictionError: if the user provided python class cannot be found.
"""
create_version_json = os.environ.get("create_version_request")
if not create_version_json:
return None
create_version_request = json.loads(create_version_json)
if not create_version_request:
return None
version = create_version_request.get("version")
if not version:
return None
class_name = version.get(class_type.name)
if not class_name:
return None
custom_class = pydoc.locate(class_name)
# TODO(): right place to generate errors?
if not custom_class:
package_uris = [str(s) for s in version.get("package_uris")]
raise PredictionError(PredictionError.INVALID_USER_CODE,
"%s cannot be found. Please make sure "
"(1) %s is the fully qualified function "
"name, and (2) %s uses the correct package "
"name as provided by the package_uris: %s" %
(class_name, class_type.name, class_type.name,
package_uris))
return custom_class
def _validate_model_class(user_class):
"""Validates a user provided instance of a Model implementation.
Args:
user_class: An instance of a Model implementation.
Raises:
PredictionError: for any of the following:
(1) the user model class does not have the correct method signatures for
the predict method
"""
user_class_name = type(user_class).__name__
# Can't use isinstance() because the user doesn't have access to our Model
# class. We can only inspect the user_class to check if it conforms to the
# Model interface.
if not hasattr(user_class, "predict"):
raise PredictionError(PredictionError.INVALID_USER_CODE,
"The provided model class, %s, is missing the "
"required predict method." % user_class_name)
# Check the predict method has the correct number of arguments
user_signature = inspect.getargspec(user_class.predict)[0]
model_signature = inspect.getargspec(Model.predict)[0]
user_predict_num_args = len(user_signature)
predict_num_args = len(model_signature)
if predict_num_args is not user_predict_num_args:
raise PredictionError(PredictionError.INVALID_USER_CODE,
"The provided model class, %s, has a predict method "
"with an invalid signature. Expected signature: %s "
"User signature: %s" %
(user_class_name, model_signature, user_signature))
# TODO(user): Make this generic so it can load any Processor class, not just
# from the create_version_request.
def _new_processor_class(model_path=None):
user_processor_cls = load_custom_class(UserClassType.processor_class)
if user_processor_cls:
user_preprocess_fn = getattr(user_processor_cls, PREPROCESS_KEY, None)
user_postprocess_fn = getattr(user_processor_cls, POSTPROCESS_KEY, None)
user_from_model_path_fn = getattr(user_processor_cls, FROM_MODEL_KEY, None)
_validate_fn_signature(user_preprocess_fn,
["self", "instances"],
PREPROCESS_KEY, user_processor_cls.__name__)
_validate_fn_signature(user_postprocess_fn,
["self", "instances"],
POSTPROCESS_KEY, user_processor_cls.__name__)
_validate_fn_signature(user_from_model_path_fn, ["cls", "model_path"],
FROM_MODEL_KEY, user_processor_cls.__name__)
if user_from_model_path_fn:
return user_from_model_path_fn(model_path) # pylint: disable=not-callable
# Call the constructor if no `from_model_path` method provided.
return user_processor_cls()
def _validate_fn_signature(fn, required_arg_names, expected_fn_name, cls_name):
if not fn:
return
if not callable(fn):
raise PredictionError(
PredictionError.INVALID_USER_CODE,
"The provided %s function in the Processor class "
"%s is not callable." % (expected_fn_name, cls_name))
for arg in required_arg_names:
if arg not in inspect.getargspec(fn).args:
raise PredictionError(
PredictionError.INVALID_USER_CODE,
"The provided %s function in the Processor class "
"has an invalid signature. It should take %s as arguments but "
"takes %s" %
(fn.__name__, required_arg_names, inspect.getargspec(fn).args))
# (TODO): Move this to a Tensorflow specific library.
class TensorFlowModel(BaseModel):
"""The default implementation of the Model interface that uses TensorFlow.
This implementation optionally performs preprocessing and postprocessing
using the provided functions. These functions accept a single instance
as input and produce a corresponding output to send to the prediction
client.
"""
def __init__(self, client):
"""Constructs a TensorFlowModel.
Args:
client: An instance of ModelServerClient or SessionClient.
"""
super(TensorFlowModel, self).__init__(client)
self._preprocess_fn = None
self._postprocess_fn = None
processor_cls = _new_processor_class()
if processor_cls:
self._preprocess_fn = getattr(processor_cls, PREPROCESS_KEY, None)
self._postprocess_fn = getattr(processor_cls, POSTPROCESS_KEY, None)
def _get_columns(self, instances, stats, signature):
"""Columnarize the instances, appending input_name, if necessary.
Instances are the same instances passed to the predict() method. Since
models with a single input can accept the raw input without the name,
we create a dict here with that name.
This list of instances is then converted into a column-oriented format:
The result is a dictionary mapping input name to a list of values for just
that input (one entry per row in the original instances list).
Args:
instances: the list of instances as provided to the predict() method.
stats: Stats object for recording timing information.
signature: SignatureDef for the current request.
Returns:
A dictionary mapping input names to their values.
Raises:
PredictionError: if an error occurs during prediction.
"""
with stats.time(COLUMNARIZE_TIME):
columns = columnarize(instances)
for k, v in six.iteritems(columns):
if k not in signature.inputs.keys():
raise PredictionError(
PredictionError.INVALID_INPUTS,
"Unexpected tensor name: %s" % k)
# Detect whether or not the user omits an input in one or more inputs.
# TODO(): perform this check in columnarize?
if isinstance(v, list) and len(v) != len(instances):
raise PredictionError(
PredictionError.INVALID_INPUTS,
"Input %s was missing in at least one input instance." % k)
return columns
# TODO(): can this be removed?
def is_single_input(self, signature):
"""Returns True if the graph only has one input tensor."""
return len(signature.inputs) == 1
# TODO(): can this be removed?
def is_single_string_input(self, signature):
"""Returns True if the graph only has one string input tensor."""
if self.is_single_input(signature):
dtype = list(signature.inputs.values())[0].dtype
return dtype == dtypes.string.as_datatype_enum
return False
def get_signature(self, signature_name=None):
return self._client.get_signature(signature_name)
def preprocess(self, instances, stats=None, signature_name=None, **kwargs):
_, signature = self.get_signature(signature_name)
preprocessed = self._canonicalize_input(instances, signature)
if self._preprocess_fn:
try:
preprocessed = self._preprocess_fn(preprocessed, **kwargs)
except Exception as e:
logging.error("Exception during preprocessing: " + str(e))
raise PredictionError(PredictionError.INVALID_INPUTS,
"Exception during preprocessing: " + str(e))
return self._get_columns(preprocessed, stats, signature)
def _canonicalize_input(self, instances, signature):
"""Preprocess single-input instances to be dicts if they aren't already."""
# The instances should be already (b64-) decoded here.
if not self.is_single_input(signature):
return instances
tensor_name = list(signature.inputs.keys())[0]
return canonicalize_single_tensor_input(instances, tensor_name)
def postprocess(self, predicted_output, original_input=None, stats=None,
signature_name=None, **kwargs):
"""Performs the necessary transformations on the prediction results.
The transformations include rowifying the predicted results, and also
making sure that each input/output is a dict mapping input/output alias to
the value for that input/output.
Args:
predicted_output: list of instances returned by the predict() method on
preprocessed instances.
original_input: List of instances, before any pre-processing was applied.
stats: Stats object for recording timing information.
signature_name: the signature name to find out the signature.
**kwargs: Additional keyword arguments for postprocessing
Returns:
A list which is a dict mapping output alias to the output.
"""
_, signature = self.get_signature(signature_name)
with stats.time(ROWIFY_TIME):
# When returned element only contains one result (batch size == 1),
# tensorflow's session.run() will return a scalar directly instead of a
# a list. So we need to listify that scalar.
# TODO(): verify this behavior is correct.
def listify(value):
if not hasattr(value, "shape"):
return np.asarray([value], dtype=np.object)
elif not value.shape:
# TODO(): pretty sure this is a bug that only exists because
# samples like iris have a bug where they use tf.squeeze which removes
# the batch dimension. The samples should be fixed.
return np.expand_dims(value, axis=0)
else:
return value
postprocessed_outputs = {
alias: listify(val)
for alias, val in six.iteritems(predicted_output)
}
postprocessed_outputs = rowify(postprocessed_outputs)
postprocessed_outputs = list(postprocessed_outputs)
if self._postprocess_fn:
try:
postprocessed_outputs = self._postprocess_fn(postprocessed_outputs,
**kwargs)
except Exception as e:
logging.error("Exception during postprocessing: %s", e)
raise PredictionError(PredictionError.INVALID_INPUTS,
"Exception during postprocessing: " + str(e))
with stats.time(ENCODE_TIME):
try:
postprocessed_outputs = encode_base64(
postprocessed_outputs, signature.outputs)
except PredictionError as e:
logging.error("Encode base64 failed: %s", e)
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances: {0}"
.format(e.error_detail))
except ValueError as e:
logging.error("Encode base64 failed: %s", e)
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances: {0}"
.format(e))
except Exception as e: # pylint: disable=broad-except
logging.error("Encode base64 failed: %s", e)
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances")
return postprocessed_outputs
@classmethod
def from_client(cls, client, unused_model_path, **unused_kwargs):
"""Creates a TensorFlowModel from a SessionClient and model data files."""
return cls(client)
@property
def signature_map(self):
return self._client.signature_map
# This class is specific to Scikit-learn, and should be moved to a separate
# module. However due to gcloud's complicated copying mechanism we need to keep
# things in one file for now.
class SklearnClient(PredictionClient):
"""A loaded scikit-learn model to be used for prediction."""
def __init__(self, predictor):
self._predictor = predictor
def predict(self, inputs, stats=None, **kwargs):
stats = stats or Stats()
stats[FRAMEWORK] = SCIKIT_LEARN_FRAMEWORK_NAME
stats[ENGINE] = SCIKIT_LEARN_FRAMEWORK_NAME
try:
return self._predictor.predict(inputs, **kwargs)
except Exception as e:
logging.exception("Exception while predicting with sklearn model.")
raise PredictionError(PredictionError.FAILED_TO_RUN_MODEL,
"Exception during sklearn prediction: " + str(e))
# (TODO) This class is specific to Xgboost, and should be moved to a
# separate module. However due to gcloud's complicated copying mechanism we need
# to keep things in one file for now.
class XgboostClient(PredictionClient):
"""A loaded xgboost model to be used for prediction."""
def __init__(self, booster):
self._booster = booster
def predict(self, inputs, stats=None, **kwargs):
stats = stats or Stats()
stats[FRAMEWORK] = XGBOOST_FRAMEWORK_NAME
stats[ENGINE] = XGBOOST_FRAMEWORK_NAME
# TODO(): Move this to the top once b/64574886 is resolved.
# Before then, it would work in production since we install xgboost in
# the Dockerfile, but the problem is the unit test that will fail to build
# and run since xgboost can not be added as a dependency to this target.
import xgboost as xgb # pylint: disable=g-import-not-at-top
try:
inputs_dmatrix = xgb.DMatrix(inputs)
except Exception as e:
logging.exception("Could not initialize DMatrix from inputs: ")
raise PredictionError(
PredictionError.FAILED_TO_RUN_MODEL,
"Could not initialize DMatrix from inputs: " + str(e))
try:
return self._booster.predict(inputs_dmatrix, **kwargs)
except Exception as e:
logging.exception("Exception during predicting with xgboost model: ")
raise PredictionError(PredictionError.FAILED_TO_RUN_MODEL,
"Exception during xgboost prediction: " + str(e))
# (TODO) Move this to a separate Scikit-learn specific library.
class SklearnModel(BaseModel):
"""The implementation of Scikit-learn Model.
"""
def __init__(self, client):
super(SklearnModel, self).__init__(client)
self._user_processor = _new_processor_class()
if self._user_processor and hasattr(self._user_processor, PREPROCESS_KEY):
self._preprocess = self._user_processor.preprocess
else:
self._preprocess = self._null_processor
if self._user_processor and hasattr(self._user_processor, POSTPROCESS_KEY):
self._postprocess = self._user_processor.postprocess
else:
self._postprocess = self._null_processor
def predict(self, instances, stats=None, **kwargs):
"""Override the predict method to remove TF-specific args from kwargs."""
kwargs.pop(SIGNATURE_KEY, None)
return super(SklearnModel, self).predict(instances, stats, **kwargs)
def preprocess(self, instances, stats=None, **kwargs):
# TODO() Consider changing this to a more generic type.
return self._preprocess(np.array(instances), **kwargs)
def postprocess(self, predicted_outputs, original_input=None, stats=None,
**kwargs):
# TODO() Consider changing this to a more generic type.
post_processed = self._postprocess(predicted_outputs, **kwargs)
if isinstance(post_processed, np.ndarray):
return post_processed.tolist()
if isinstance(post_processed, list):
return post_processed
raise PredictionError(
PredictionError.INVALID_OUTPUTS,
"Bad output type returned after running %s"
"The post-processing function should return either "
"a numpy ndarray or a list."
% self._postprocess.__name__)
def _null_processor(self, instances, **unused_kwargs):
return instances
# (TODO)Move this to a XGboost specific library.
class XGBoostModel(SklearnModel):
"""The implementation of XGboost Model.
"""
def __init__(self, client):
super(XGBoostModel, self).__init__(client)
def create_sklearn_client(model_path, unused_tags):
"""Returns a prediction client for the corresponding sklearn model."""
logging.info("Loading the scikit-learn model file from %s", model_path)
sklearn_predictor = _load_joblib_or_pickle_model(model_path)
if not sklearn_predictor:
error_msg = "Could not find either {} or {} in {}".format(
MODEL_FILE_NAME_JOBLIB, MODEL_FILE_NAME_PICKLE, model_path)
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
# Check if the loaded python object is an sklearn model/pipeline.
# Ex. type(sklearn_predictor).__module__ -> 'sklearn.svm.classes'
# type(pipeline).__module__ -> 'sklearn.pipeline'
if "sklearn" not in type(sklearn_predictor).__module__:
error_msg = ("Invalid model type detected: {}.{}. Please make sure the "
"model file is an exported sklearn model or pipeline.").format(
type(sklearn_predictor).__module__,
type(sklearn_predictor).__name__)
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
return SklearnClient(sklearn_predictor)
def create_sklearn_model(model_path, unused_flags):
"""Returns a sklearn model from the given model_path."""
return SklearnModel(create_sklearn_client(model_path, None))
def create_xgboost_client(model_path, unused_tags):
"""Returns a prediction client for the corresponding xgboost model."""
logging.info("Loading the xgboost model from %s", model_path)
booster = _load_joblib_or_pickle_model(model_path) or _load_xgboost_model(
model_path)
if not booster:
error_msg = "Could not find {}, {}, or {} in {}".format(
MODEL_FILE_NAME_JOBLIB, MODEL_FILE_NAME_PICKLE, MODEL_FILE_NAME_BST,
model_path)
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
# Check if the loaded python object is an xgboost model.
# Expect type(booster).__module__ -> 'xgboost.core'
if "xgboost" not in type(booster).__module__:
error_msg = ("Invalid model type detected: {}.{}. Please make sure the "
"model file is an exported xgboost model.").format(
type(booster).__module__,
type(booster).__name__)
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
return XgboostClient(booster)
def _load_xgboost_model(model_path):
"""Loads an xgboost model from GCS or local.
Args:
model_path: path to the directory containing the xgboost model.bst file.
This path can be either a local path or a GCS path.
Returns:
A xgboost.Booster with the model at model_path loaded.
Raises:
PredictionError: If there is a problem while loading the file.
"""
# TODO(): Move this to the top once b/64574886 is resolved. Before
# then, it would work in production since we install xgboost in the
# Dockerfile, but the problem is the unit test that will fail to build and run
# since xgboost can not be added as a dependency to this target.
import xgboost as xgb # pylint: disable=g-import-not-at-top
model_file = os.path.join(model_path, MODEL_FILE_NAME_BST)
if not file_io.file_exists(model_file):
return None
try:
if model_file.startswith("gs://"):
with file_io.FileIO(model_file, mode="rb") as f:
# TODO(): Load model in memory twice. Use readinto if/when
# that becomes available in FileIO. Or copy model locally before
# loading.
model_buf = bytearray(f.read())
return xgb.Booster(model_file=model_buf)
else:
return xgb.Booster(model_file=model_file)
except xgb.core.XGBoostError as e:
error_msg = "Could not load the model: {}. {}.".format(
os.path.join(model_path, MODEL_FILE_NAME_BST), str(e))
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
def create_xgboost_model(model_path, unused_flags):
"""Returns a xgboost model from the given model_path."""
return XGBoostModel(create_xgboost_client(model_path, None))
# (TODO): Move this to a Tensorflow specific library.
def create_tf_session_client(model_dir, tags):
return SessionClient(*load_model(model_dir, tags))
def _load_joblib_or_pickle_model(model_path):
"""Loads either a .joblib or .pkl file from GCS or from local.
Loads one of MODEL_FILE_NAME_JOBLIB or MODEL_FILE_NAME_PICKLE files if they
exist. This is used for both sklearn and xgboost.
Arguments:
model_path: The path to the directory that contains the model file. This
path can be either a local path or a GCS path.
Raises:
PredictionError: If there is a problem while loading the file.
Returns:
A loaded scikit-learn or xgboost predictor object or None if neither
MODEL_FILE_NAME_JOBLIB nor MODEL_FILE_NAME_PICKLE files are found.
"""
try:
# If we put this at the top, we need to add a dependency to sklearn
# anywhere that prediction_lib is called.
from sklearn.externals import joblib # pylint: disable=g-import-not-at-top
except Exception as e:
error_msg = "Could not import sklearn module."
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
try:
if file_io.file_exists(os.path.join(model_path, MODEL_FILE_NAME_JOBLIB)):
model_file_name = os.path.join(model_path, MODEL_FILE_NAME_JOBLIB)
logging.info("Loading model %s using joblib.", model_file_name)
with file_io.FileIO(model_file_name, mode="rb") as f:
return joblib.load(f)
elif file_io.file_exists(os.path.join(model_path, MODEL_FILE_NAME_PICKLE)):
model_file_name = os.path.join(model_path, MODEL_FILE_NAME_PICKLE)
logging.info("Loading model %s using pickle.", model_file_name)
with file_io.FileIO(model_file_name, "rb") as f:
return pickle.loads(f.read())
return None
except Exception as e:
error_msg = (
"Could not load the model: {}. {}. Please make sure the model was "
"exported using python {}. Otherwise, please specify the correct "
"'python_version' parameter when deploying the model. Currently, "
"'python_version' accepts 2.7 and 3.5."
).format(model_file_name, str(e), sys.version_info[0])
logging.critical(error_msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)
_FRAMEWORK_TO_MODEL_MAP = {
TENSORFLOW_FRAMEWORK_NAME: (TensorFlowModel, create_tf_session_client),
SCIKIT_LEARN_FRAMEWORK_NAME: (SklearnModel, create_sklearn_client),
XGBOOST_FRAMEWORK_NAME: (XGBoostModel, create_xgboost_client)
}
def create_model(client,
model_path,
framework=TENSORFLOW_FRAMEWORK_NAME,
**unused_kwargs):
"""Creates and returns the appropriate model.
Creates and returns a Model if no user specified model is
provided. Otherwise, the user specified model is imported, created, and
returned.
Args:
client: An instance of PredictionClient for performing prediction.
model_path: The path to the exported model (e.g. session_bundle or
SavedModel)
framework: The framework used to train the model.
Returns:
An instance of the appropriate model class.
"""
if framework is TENSORFLOW_FRAMEWORK_NAME:
logging.info("Importing tensorflow.contrib in create_model")
import tensorflow.contrib # pylint: disable=redefined-outer-name, unused-variable, g-import-not-at-top
model_cls = _FRAMEWORK_TO_MODEL_MAP[framework][0]
return (load_model_class(client, model_path) or
model_cls(client))
def create_client(framework, model_path, tags):
framework = framework or TENSORFLOW_FRAMEWORK_NAME
create_client_fn = _FRAMEWORK_TO_MODEL_MAP[framework][1]
return create_client_fn(model_path, tags)
def decode_base64(data):
if isinstance(data, list):
return [decode_base64(val) for val in data]
elif isinstance(data, dict):
if six.viewkeys(data) == {"b64"}:
return base64.b64decode(data["b64"])
else:
return {k: decode_base64(v) for k, v in six.iteritems(data)}
else:
return data
def encode_base64(instances, outputs_map):
"""Encodes binary data in a JSON-friendly way."""
if not isinstance(instances, list):
raise ValueError("only lists allowed in output; got %s" %
(type(instances),))
if not instances:
return instances
first_value = instances[0]
if not isinstance(first_value, dict):
if len(outputs_map) != 1:
return ValueError("The first instance was a string, but there are "
"more than one output tensor, so dict expected.")
# Only string tensors whose name ends in _bytes needs encoding.
tensor_name, tensor_info = outputs_map.items()[0]
tensor_type = tensor_info.dtype
if tensor_type == dtypes.string:
instances = _encode_str_tensor(instances, tensor_name)
return instances
encoded_data = []
for instance in instances:
encoded_instance = {}
for tensor_name, tensor_info in six.iteritems(outputs_map):
tensor_type = tensor_info.dtype
tensor_data = instance[tensor_name]
if tensor_type == dtypes.string:
tensor_data = _encode_str_tensor(tensor_data, tensor_name)
encoded_instance[tensor_name] = tensor_data
encoded_data.append(encoded_instance)
return encoded_data
def _encode_str_tensor(data, tensor_name):
"""Encodes tensor data of type string.
Data is a bytes in python 3 and a string in python 2. Base 64 encode the data
if the tensorname ends in '_bytes', otherwise convert data to a string.
Args:
data: Data of the tensor, type bytes in python 3, string in python 2.
tensor_name: The corresponding name of the tensor.
Returns:
JSON-friendly encoded version of the data.
"""
if isinstance(data, list):
return [_encode_str_tensor(val, tensor_name) for val in data]
if tensor_name.endswith("_bytes"):
return {"b64": compat.as_text(base64.b64encode(data))}
else:
return compat.as_text(data)
def local_predict(
model_dir=None,
tags=(tag_constants.SERVING,),
signature_name=None,
instances=None,
framework=TENSORFLOW_FRAMEWORK_NAME):
"""Run a prediction locally."""
instances = decode_base64(instances)
client = create_client(framework, model_dir, tags)
model = create_model(client, model_dir, framework)
_, predictions = model.predict(instances, signature_name=signature_name)
return {"predictions": list(predictions)}
| 8,515 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/go.mod | module github.com/kubeflow/fate-operator
go 1.15
require (
github.com/FederatedAI/KubeFATE/k8s-deploy v0.0.0-20210225010756-06ee1daad13c
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
github.com/bitly/go-simplejson v0.5.0 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
github.com/bugsnag/bugsnag-go v2.1.0+incompatible // indirect
github.com/bugsnag/panicwrap v1.3.1 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/garyburd/redigo v1.6.2 // indirect
github.com/go-logr/logr v0.4.0
github.com/gofrs/uuid v4.0.0+incompatible // indirect
github.com/gorilla/handlers v1.5.1 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/onsi/ginkgo v1.15.1
github.com/onsi/gomega v1.11.0
github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 // indirect
github.com/yvasiyarov/gorelic v0.0.7 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 // indirect
helm.sh/helm/v3 v3.5.2 // indirect
k8s.io/api v0.20.4
k8s.io/apimachinery v0.20.4
k8s.io/client-go v0.20.4
sigs.k8s.io/controller-runtime v0.8.3
)
replace (
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.4
)
| 8,516 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/README.md | # fate-operator: Kubernetes Operator for FATE (Federated AI Technology Enabler)
## Overview
The FATE-Operator makes it easy to deploy and run federated machine learning (FML) jobs on Kubernetes. It is a early version, all suggestions and feature requests are valueables for us. Raising issues is appreciate.
## Background
Federated machine learning (FML) is a machine learning setting where many clients (e.g. mobile devices or organizations) collaboratively train a model under the coordination of a central server while keeping the training data decentralized. Only the encrypted mediate parameters are exchanged between clients with MPC or homomorphic encryption.

FML has received significant interest recently, because of its effectiveness to solve data silos and data privacy preserving problems. Companies participated in federated machine learning include 4Paradigm, Ant Financial, Data Republic, Google, Huawei, Intel, JD.com, Microsoft, Nvidia, OpenMind, Pingan Technology, Sharemind, Tencent, VMware, Webank etc.
Depending on the differences in features and sample data space, federated machine learning can be classified into _horizontal federated machine learning_, _vertical federated machine learning_ and _federated transfer learning_. Horizontal federated machine learning is also called sample-based federated machine learning, which means data sets share the same feature space but have different samples. With horizontal federated machine learning, we can gather the relatively small or partial datasets into a big one to increase the performance of trained models. Vertical federated machine learning is applicable to the cases where there are two datasets with different feature space but share same sample ID. With vertical federated machine learning we can train a model with attributes from different organizations for a full profile. Vertical federated machine learning is required to redesign most of machine learning algorithms. Federated transfer learning applies to scenarios where there are two datasets with different features space but also different samples.
[FATE (Federated AI Technology Enabler)](https://fate.fedai.org) is an open source project initialized by Webank, [now hosted at the Linux Foundation](https://fate.fedai.org/2019/09/18/first-digital-only-bank-in-china-joins-linux-foundation/). FATE is the only open source FML framework that supports both horizontal and vertical FML currently. The architecture design of FATE is focused on providing FML platform for enterprises. [KubeFATE](https://github.com/FederatedAI/KubeFATE) is an open source project to deploy FATE on Kubernetes and is a proven effective solution for FML use cases.
More technologies of Federated machine learning, please refer to [Reference section](#reference)
**The design proposal of fate-operator can be found in [kubeflow/community/fate-operator-proposal.md](https://github.com/kubeflow/community/blob/master/proposals/fate-operator-proposal.md).**
## Quick user guide
### Installation
The fate-operator can be installed by following steps.
#### Install CRDs to Kubernetes
```bash
make install
```
#### Uninstall CRDs from Kubernetes
```bash
make uninstall
```
#### Building controller images
The Docker images are built and pushed to Dockerhub.
[fate-operator](https://hub.docker.com/r/federatedai/fate-controller)
Alternatively, we can build the images manually by commands,
```bash
make docker-build
```
#### Deploying controller
```bash
make deploy
```
### Deploying KubeFATE
KubeFATE is the infrastructure management service for multiple FATE clusters in one organization. It will only deploy once. To deploy a KubeFATE service, we use the YAML refer to [app_v1beta1_kubefate.yaml](./config/samples/app_v1beta1_kubefate.yaml) as an example,
```bash
kubectl create -f ./config/samples/app_v1beta1_kubefate.yaml
```
### Deploying FATE
FATE is the cluster we run FML jobs. To deploy a FATE cluster, we use YAML refer to [app_v1beta1_fatecluster.yaml](./config/samples/app_v1beta1_fatecluster.yaml) as an example,
```bash
kubectl create -f ./config/samples/app_v1beta1_fatecluster.yaml
```
### Submitting an FML Job
Once KubeFATE and FATE cluster deployed, we can submit a FML job with `FateJob` config file. In current version, the FATE Job is defined by `DSL pipeline` and `Module Config` two parts. The details refer to [app_v1beta1_fatejob.yaml](./config/samples/app_v1beta1_fatejob.yaml).
```bash
kubectl create -f ./config/samples/app_v1beta1_fatejob.yaml
```
In this example, only a `secure add` operation will be processed. For more example, we can refer to [FATE Examples](https://github.com/FederatedAI/FATE/tree/master/examples/federatedml-1.x-examples). In each example, the files end with `_dsl`, e.g. <https://github.com/FederatedAI/FATE/blob/master/examples/federatedml-1.x-examples/hetero_linear_regression/test_hetero_linr_cv_job_dsl.json> is the job pipeline and what should be put in `pipeline` field; and the files end with `_conf`, e.g. <https://github.com/FederatedAI/FATE/blob/master/examples/federatedml-1.x-examples/hetero_linear_regression/test_hetero_linr_cv_job_conf.json> is the config of each components and what should be put in `modulesConf` field.
### Checking created resource status
The status of created resource can be monitor with `kubectl get` command,
```bash
kubectl get kubefate,fatecluster,fatejob -A -o yaml
```
## Reference
1. Qiang Yang, Yang Liu, Tianjian Chen, and Yongxin Tong. Federated machine learning: Concept and applications. CoRR, abs/1902.04885, 2019. URL <http://arxiv.org/abs/1902.04885>
2. Peter Kairouz et al. Advances and open problems in federated learning. arXiv preprint arXiv:1912.04977
| 8,517 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/Makefile |
# Image URL to use all building/pushing image targets
IMG ?= federatedai/fate-controller:latest
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
all: manager
# Run tests
test: generate fmt vet manifests
go test ./... -coverprofile cover.out
# Build manager binary
manager: generate fmt vet
go build -o bin/fate-operator main.go
# Run against the configured Kubernetes cluster in ~/.kube/config
run: generate fmt vet manifests
go run ./main.go
# Install CRDs into a cluster
install: manifests
kustomize build config/crd | kubectl apply -f -
# Uninstall CRDs from a cluster
uninstall: manifests
kustomize build config/crd | kubectl delete -f -
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
deploy: manifests
cd config/manager && kustomize edit set image controller=${IMG}
kustomize build config/default | kubectl apply -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
# Run go fmt against code
fmt:
go fmt ./...
# Run go vet against code
vet:
go vet ./...
# Generate code
generate: controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
# Build the docker image
docker-build: test
docker build . -t ${IMG}
# Build from codes downloaded directly
docker-build-without-test:
docker build . -t ${IMG}
# Push the docker image
docker-push:
docker push ${IMG}
# find or download controller-gen
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
@{ \
set -e ;\
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go get sigs.k8s.io/controller-tools/cmd/[email protected] ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
CONTROLLER_GEN=$(GOBIN)/controller-gen
else
CONTROLLER_GEN=$(shell which controller-gen)
endif
| 8,518 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 8,519 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/main.go | /*
* Copyright 2019-2023 VMware, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"os"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
appv1beta1 "github.com/kubeflow/fate-operator/api/v1beta1"
"github.com/kubeflow/fate-operator/controllers"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = appv1beta1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&controllers.FateOperatorTest, "fate-operator-test", false, "Use when you need to test a fate-operator.")
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "8e76d23a.kubefate.net",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controllers.FateJobReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("FateJob"),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("FateJob-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "FateJob")
os.Exit(1)
}
if err = (&controllers.FateClusterReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("FateCluster"),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("FateCluster-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "FateCluster")
os.Exit(1)
}
if err = (&controllers.KubefateReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Kubefate"),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("Kubefate-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Kubefate")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
| 8,520 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/OWNERS | approvers:
- gaocegege
- jiahaoc1993
- johnugeorge
- LaynePeng
| 8,521 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/Dockerfile | # Build the manager binary
FROM golang:1.15 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o fate-operator main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/fate-operator .
USER 65532:65532
ENTRYPOINT ["/fate-operator"]
| 8,522 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/PROJECT | domain: kubefate.net
repo: fate-operator
resources:
- group: app
kind: FateJob
version: v1beta1
- group: app
kind: FateCluster
version: v1beta1
- group: app
kind: Kubefate
version: v1beta1
version: "2"
| 8,523 |
0 | kubeflow_public_repos | kubeflow_public_repos/fate-operator/go.sum | bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/FederatedAI/KubeFATE/k8s-deploy v0.0.0-20210225010756-06ee1daad13c h1:+igGiUmIdIRTg9OaIbg2ZCTdzvSi9I/AuK/2NKQKgIs=
github.com/FederatedAI/KubeFATE/k8s-deploy v0.0.0-20210225010756-06ee1daad13c/go.mod h1:+kQUDZjfQp/BCaGybFE/mcxrXjc+7QkIRG/R1374VUE=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA=
github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA=
github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8=
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns=
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bugsnag/bugsnag-go v2.1.0+incompatible h1:SuqsBHDutts2rZh4swHEWTexxi0F/JZ/6j1rR9BFe7I=
github.com/bugsnag/bugsnag-go v2.1.0+incompatible/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/panicwrap v1.3.1 h1:pmuhHlhbUV4OOrGDvoiMjHSZzwRcL+I9cIzYKiW4lII=
github.com/bugsnag/panicwrap v1.3.1/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 h1:6ejg6Lkk8dskcM7wQ28gONkukbQkM4qpj4RnYbpFzrI=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
github.com/deislabs/oras v0.10.0 h1:Eufbi8zVaULb7vYj5HKM9qv9qw6fJ7P75JSjn//gR0E=
github.com/deislabs/oras v0.10.0/go.mod h1:N1UzE7rBa9qLyN4l8IlBTxc2PkrRcKgWQ3HTJvRnJRE=
github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.3+incompatible h1:WVEgoV/GpsTK5hruhHdYi79blQ+nmcm+7Ru/ZuiF+7E=
github.com/docker/cli v20.10.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/garyburd/redigo v1.6.2 h1:yE/pwKCrbLpLpQICzYTeZ7JsTA/C53wFTJHaEtRqniM=
github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w=
github.com/gin-contrib/logger v0.0.2/go.mod h1:Eca5g93bobBwWSNeuLdTqRvNK6btb3XSHdU9ePZ+toM=
github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=
github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8=
github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
github.com/gobuffalo/logger v1.0.1 h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg=
github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4=
github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
github.com/gobuffalo/packr/v2 v2.7.1 h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o=
github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg=
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=
github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.5.4 h1:ynbQIWjLw7iv6HAFdixb30U7Uvcmx+f4KlLJpmhkTK0=
github.com/googleapis/gnostic v0.5.4/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8=
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 h1:cvy4lBOYN3gKfKj8Lzz5Q9TfviP+L7koMHY7SvkyTKs=
github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.15.1 h1:DsXNrKujDlkMS9Rsxmd+Fg7S6Kc5lhE+qX8tY6laOxc=
github.com/onsi/ginkgo v1.15.1/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug=
github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY=
github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.16.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I=
github.com/rs/zerolog v1.19.0 h1:hYz4ZVdUgjXTBUmrkrw55j1nHx68LfOKIQk5IYtyScg=
github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc=
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk=
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E=
github.com/swaggo/gin-swagger v1.3.0/go.mod h1:oy1BRA6WvgtCp848lhxce7BnWH4C8Bxa0m5SkWx+cS0=
github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc=
github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U=
github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE=
github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=
gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=
gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.0.3 h1:+JKBYPfn1tygR1/of/Fh2T8iwuVwzt+PEJmKaXzMQXg=
gorm.io/driver/mysql v1.0.3/go.mod h1:twGxftLBlFgNVNakL7F+P/x9oYqoymG3YYT8cAfI9oI=
gorm.io/driver/sqlite v1.1.4 h1:PDzwYE+sI6De2+mxAneV9Xs11+ZyKV6oxD3wDGkaNvM=
gorm.io/driver/sqlite v1.1.4/go.mod h1:mJCeTFr7+crvS+TRnWc5Z3UvwxUN1BGBLMrf5LA9DYw=
gorm.io/gorm v1.20.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.20.7/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.20.8 h1:iToaOdZgjNvlc44NFkxfLa3U9q63qwaxt0FdNCiwOMs=
gorm.io/gorm v1.20.8/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0=
helm.sh/helm/v3 v3.5.2 h1:Us7qDuUuPYDJhkCo5tVVjfZmC7JlNnEmiqCJHAZVEj0=
helm.sh/helm/v3 v3.5.2/go.mod h1:7+CqT745B1Sy/4dzhzbbY9U08pGnJfrJXBkoEEFj18c=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
k8s.io/apiextensions-apiserver v0.20.2 h1:rfrMWQ87lhd8EzQWRnbQ4gXrniL/yTRBgYH1x1+BLlo=
k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.2 h1:lGno2t3gcZnLtzsKH4oG0xA9/4GTiBzMO1DGp+K+Bak=
k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA=
k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
k8s.io/cli-runtime v0.20.1/go.mod h1:6wkMM16ZXTi7Ow3JLYPe10bS+XBnIkL6V9dmEz0mbuY=
k8s.io/cli-runtime v0.20.2 h1:W0/FHdbApnl9oB7xdG643c/Zaf7TZT+43I+zKxwqvhU=
k8s.io/cli-runtime v0.20.2/go.mod h1:FjH6uIZZZP3XmwrXWeeYCbgxcrD6YXxoAykBaWH0VdM=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.2 h1:LMmu5I0pLtwjpp5009KLuMGFqSc2S2isGw8t1hpYKLE=
k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0=
k8s.io/component-helpers v0.20.1/go.mod h1:Q8trCj1zyLNdeur6pD2QvsF8d/nWVfK71YjN5+qVXy4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
k8s.io/kubectl v0.20.1 h1:7h1vSrL/B3hLrhlCJhbTADElPKDbx+oVUt3+QDSXxBo=
k8s.io/kubectl v0.20.1/go.mod h1:2bE0JLYTRDVKDiTREFsjLAx4R2GvUtL/mGYFXfFFMzY=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
k8s.io/metrics v0.20.1/go.mod h1:JhpBE/fad3yRGsgEpiZz5FQQM5wJ18OTLkD7Tv40c0s=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM=
rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.8.3 h1:GMHvzjTmaWHQB8HadW+dIvBoJuLvZObYJ5YoZruPRao=
sigs.k8s.io/controller-runtime v0.8.3/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
| 8,524 |
0 | kubeflow_public_repos/fate-operator | kubeflow_public_repos/fate-operator/vendor/modules.txt | # cloud.google.com/go v0.54.0
cloud.google.com/go/compute/metadata
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
# github.com/FederatedAI/KubeFATE/k8s-deploy v0.0.0-20210225010756-06ee1daad13c
## explicit
github.com/FederatedAI/KubeFATE/k8s-deploy/pkg/modules
github.com/FederatedAI/KubeFATE/k8s-deploy/pkg/orm
github.com/FederatedAI/KubeFATE/k8s-deploy/pkg/service
github.com/FederatedAI/KubeFATE/k8s-deploy/pkg/service/kube
github.com/FederatedAI/KubeFATE/k8s-deploy/pkg/utils
# github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd
github.com/MakeNowJust/heredoc
# github.com/Masterminds/goutils v1.1.1
github.com/Masterminds/goutils
# github.com/Masterminds/semver/v3 v3.1.1
github.com/Masterminds/semver/v3
# github.com/Masterminds/sprig/v3 v3.2.2
github.com/Masterminds/sprig/v3
# github.com/Masterminds/squirrel v1.5.0
github.com/Masterminds/squirrel
# github.com/Microsoft/go-winio v0.4.16
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/pkg/security
github.com/Microsoft/go-winio/vhd
# github.com/Microsoft/hcsshim v0.8.14
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
github.com/Microsoft/hcsshim/internal/cow
github.com/Microsoft/hcsshim/internal/hcs
github.com/Microsoft/hcsshim/internal/hcserror
github.com/Microsoft/hcsshim/internal/hns
github.com/Microsoft/hcsshim/internal/interop
github.com/Microsoft/hcsshim/internal/log
github.com/Microsoft/hcsshim/internal/logfields
github.com/Microsoft/hcsshim/internal/longpath
github.com/Microsoft/hcsshim/internal/mergemaps
github.com/Microsoft/hcsshim/internal/oc
github.com/Microsoft/hcsshim/internal/safefile
github.com/Microsoft/hcsshim/internal/schema1
github.com/Microsoft/hcsshim/internal/schema2
github.com/Microsoft/hcsshim/internal/timeout
github.com/Microsoft/hcsshim/internal/vmcompute
github.com/Microsoft/hcsshim/internal/wclayer
github.com/Microsoft/hcsshim/internal/winapi
github.com/Microsoft/hcsshim/osversion
# github.com/PuerkitoBio/purell v1.1.1
github.com/PuerkitoBio/purell
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
github.com/PuerkitoBio/urlesc
# github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d
## explicit
# github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535
github.com/asaskevich/govalidator
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
# github.com/bitly/go-simplejson v0.5.0
## explicit
# github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869
## explicit
# github.com/bshuster-repo/logrus-logstash-hook v1.0.0
## explicit
# github.com/bugsnag/bugsnag-go v2.1.0+incompatible
## explicit
# github.com/bugsnag/panicwrap v1.3.1
## explicit
# github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2
# github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59
github.com/containerd/cgroups/stats/v1
# github.com/containerd/containerd v1.4.3
github.com/containerd/containerd/archive/compression
github.com/containerd/containerd/content
github.com/containerd/containerd/content/local
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/filters
github.com/containerd/containerd/images
github.com/containerd/containerd/labels
github.com/containerd/containerd/log
github.com/containerd/containerd/platforms
github.com/containerd/containerd/reference
github.com/containerd/containerd/remotes
github.com/containerd/containerd/remotes/docker
github.com/containerd/containerd/remotes/docker/schema1
github.com/containerd/containerd/sys
github.com/containerd/containerd/version
# github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7
github.com/containerd/continuity/pathdriver
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
# github.com/deislabs/oras v0.10.0
github.com/deislabs/oras/pkg/artifact
github.com/deislabs/oras/pkg/auth
github.com/deislabs/oras/pkg/auth/docker
github.com/deislabs/oras/pkg/content
github.com/deislabs/oras/pkg/context
github.com/deislabs/oras/pkg/oras
# github.com/docker/cli v20.10.3+incompatible
github.com/docker/cli/cli/config
github.com/docker/cli/cli/config/configfile
github.com/docker/cli/cli/config/credentials
github.com/docker/cli/cli/config/types
# github.com/docker/distribution v2.7.1+incompatible => github.com/docker/distribution v2.7.1+incompatible
github.com/docker/distribution
github.com/docker/distribution/digestset
github.com/docker/distribution/metrics
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client
github.com/docker/distribution/registry/client/auth
github.com/docker/distribution/registry/client/auth/challenge
github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
# github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309
github.com/docker/docker/api/types
github.com/docker/docker/api/types/blkiodev
github.com/docker/docker/api/types/container
github.com/docker/docker/api/types/filters
github.com/docker/docker/api/types/mount
github.com/docker/docker/api/types/network
github.com/docker/docker/api/types/registry
github.com/docker/docker/api/types/strslice
github.com/docker/docker/api/types/swarm
github.com/docker/docker/api/types/swarm/runtime
github.com/docker/docker/api/types/versions
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/idtools
github.com/docker/docker/pkg/ioutils
github.com/docker/docker/pkg/jsonmessage
github.com/docker/docker/pkg/longpath
github.com/docker/docker/pkg/mount
github.com/docker/docker/pkg/stringid
github.com/docker/docker/pkg/system
github.com/docker/docker/pkg/tarsum
github.com/docker/docker/pkg/term
github.com/docker/docker/pkg/term/windows
github.com/docker/docker/registry
github.com/docker/docker/registry/resumable
# github.com/docker/docker-credential-helpers v0.6.3
github.com/docker/docker-credential-helpers/client
github.com/docker/docker-credential-helpers/credentials
# github.com/docker/go-connections v0.4.0
github.com/docker/go-connections/nat
github.com/docker/go-connections/sockets
github.com/docker/go-connections/tlsconfig
# github.com/docker/go-metrics v0.0.1
## explicit
github.com/docker/go-metrics
# github.com/docker/go-units v0.4.0
github.com/docker/go-units
# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
## explicit
# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
github.com/docker/spdystream
github.com/docker/spdystream/spdy
# github.com/emicklei/go-restful v2.9.5+incompatible
github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
# github.com/evanphx/json-patch v4.9.0+incompatible
github.com/evanphx/json-patch
# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d
github.com/exponent-io/jsonpath
# github.com/fatih/color v1.7.0
github.com/fatih/color
# github.com/fsnotify/fsnotify v1.4.9
github.com/fsnotify/fsnotify
# github.com/garyburd/redigo v1.6.2
## explicit
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/go-logr/logr v0.4.0
## explicit
github.com/go-logr/logr
# github.com/go-logr/zapr v0.2.0
github.com/go-logr/zapr
# github.com/go-openapi/jsonpointer v0.19.3
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.19.3
github.com/go-openapi/jsonreference
# github.com/go-openapi/spec v0.19.3
github.com/go-openapi/spec
# github.com/go-openapi/swag v0.19.5
github.com/go-openapi/swag
# github.com/go-sql-driver/mysql v1.5.0
github.com/go-sql-driver/mysql
# github.com/gobwas/glob v0.2.3
github.com/gobwas/glob
github.com/gobwas/glob/compiler
github.com/gobwas/glob/match
github.com/gobwas/glob/syntax
github.com/gobwas/glob/syntax/ast
github.com/gobwas/glob/syntax/lexer
github.com/gobwas/glob/util/runes
github.com/gobwas/glob/util/strings
# github.com/gofrs/flock v0.8.0
github.com/gofrs/flock
# github.com/gofrs/uuid v4.0.0+incompatible
## explicit
# github.com/gogo/protobuf v1.3.1
github.com/gogo/protobuf/gogoproto
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
github.com/gogo/protobuf/sortkeys
# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
github.com/golang/groupcache/lru
# github.com/golang/protobuf v1.4.3
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
# github.com/google/btree v1.0.0
github.com/google/btree
# github.com/google/go-cmp v0.5.2
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
# github.com/google/gofuzz v1.1.0
github.com/google/gofuzz
# github.com/google/uuid v1.1.2
github.com/google/uuid
# github.com/googleapis/gnostic v0.5.1 => github.com/googleapis/gnostic v0.5.4
github.com/googleapis/gnostic/compiler
github.com/googleapis/gnostic/extensions
github.com/googleapis/gnostic/jsonschema
github.com/googleapis/gnostic/openapiv2
# github.com/gorilla/handlers v1.5.1
## explicit
# github.com/gorilla/mux v1.8.0
## explicit
github.com/gorilla/mux
# github.com/gosuri/uitable v0.0.4
github.com/gosuri/uitable
github.com/gosuri/uitable/util/strutil
github.com/gosuri/uitable/util/wordwrap
# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7
github.com/gregjones/httpcache
github.com/gregjones/httpcache/diskcache
# github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/golang-lru
github.com/hashicorp/golang-lru/simplelru
# github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/hcl
github.com/hashicorp/hcl/hcl/ast
github.com/hashicorp/hcl/hcl/parser
github.com/hashicorp/hcl/hcl/printer
github.com/hashicorp/hcl/hcl/scanner
github.com/hashicorp/hcl/hcl/strconv
github.com/hashicorp/hcl/hcl/token
github.com/hashicorp/hcl/json/parser
github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token
# github.com/huandu/xstrings v1.3.1
github.com/huandu/xstrings
# github.com/imdario/mergo v0.3.11
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
# github.com/jinzhu/inflection v1.0.0
github.com/jinzhu/inflection
# github.com/jinzhu/now v1.1.1
github.com/jinzhu/now
# github.com/jmoiron/sqlx v1.2.0
github.com/jmoiron/sqlx
github.com/jmoiron/sqlx/reflectx
# github.com/json-iterator/go v1.1.10
github.com/json-iterator/go
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
## explicit
# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0
github.com/lann/builder
# github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0
github.com/lann/ps
# github.com/lib/pq v1.9.0
github.com/lib/pq
github.com/lib/pq/oid
github.com/lib/pq/scram
# github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/liggitt/tabwriter
# github.com/magiconair/properties v1.8.1
github.com/magiconair/properties
# github.com/mailru/easyjson v0.7.0
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
# github.com/mattn/go-colorable v0.0.9
github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.4
github.com/mattn/go-runewidth
# github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/mattn/go-sqlite3
# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369
github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/copystructure
# github.com/mitchellh/go-wordwrap v1.0.0
github.com/mitchellh/go-wordwrap
# github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/mapstructure
# github.com/mitchellh/reflectwalk v1.0.0
github.com/mitchellh/reflectwalk
# github.com/moby/term v0.0.0-20200312100748-672ec06f55cd
github.com/moby/term
github.com/moby/term/windows
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
# github.com/morikuni/aec v1.0.0
github.com/morikuni/aec
# github.com/naoina/go-stringutil v0.1.0
github.com/naoina/go-stringutil
# github.com/naoina/toml v0.1.1
github.com/naoina/toml
github.com/naoina/toml/ast
# github.com/nxadm/tail v1.4.8
github.com/nxadm/tail
github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util
github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile
# github.com/onsi/ginkgo v1.15.1
## explicit
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
github.com/onsi/ginkgo/internal/codelocation
github.com/onsi/ginkgo/internal/containernode
github.com/onsi/ginkgo/internal/failer
github.com/onsi/ginkgo/internal/global
github.com/onsi/ginkgo/internal/leafnodes
github.com/onsi/ginkgo/internal/remote
github.com/onsi/ginkgo/internal/spec
github.com/onsi/ginkgo/internal/spec_iterator
github.com/onsi/ginkgo/internal/specrunner
github.com/onsi/ginkgo/internal/suite
github.com/onsi/ginkgo/internal/testingtproxy
github.com/onsi/ginkgo/internal/writer
github.com/onsi/ginkgo/reporters
github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
# github.com/onsi/gomega v1.11.0
## explicit
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/gbytes
github.com/onsi/gomega/gexec
github.com/onsi/gomega/internal/assertion
github.com/onsi/gomega/internal/asyncassertion
github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/internal/testingtsupport
github.com/onsi/gomega/matchers
github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
github.com/onsi/gomega/types
# github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v0.1.1
github.com/opencontainers/runc/libcontainer/user
# github.com/pelletier/go-toml v1.2.0
github.com/pelletier/go-toml
# github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/peterbourgon/diskv
# github.com/pkg/errors v0.9.1
github.com/pkg/errors
# github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.2.0
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.10.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.2.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/rs/zerolog v1.19.0
github.com/rs/zerolog
github.com/rs/zerolog/internal/cbor
github.com/rs/zerolog/internal/json
github.com/rs/zerolog/log
# github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351
github.com/rubenv/sql-migrate
github.com/rubenv/sql-migrate/sqlparse
# github.com/russross/blackfriday v1.5.2
github.com/russross/blackfriday
# github.com/satori/go.uuid v1.2.0
github.com/satori/go.uuid
# github.com/shopspring/decimal v1.2.0
github.com/shopspring/decimal
# github.com/sirupsen/logrus v1.7.0
github.com/sirupsen/logrus
# github.com/spf13/afero v1.2.2
github.com/spf13/afero
github.com/spf13/afero/mem
# github.com/spf13/cast v1.3.1
github.com/spf13/cast
# github.com/spf13/cobra v1.1.1
github.com/spf13/cobra
# github.com/spf13/jwalterweatherman v1.0.0
github.com/spf13/jwalterweatherman
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
# github.com/spf13/viper v1.7.0
github.com/spf13/viper
# github.com/subosito/gotenv v1.2.0
github.com/subosito/gotenv
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
github.com/xeipuuv/gojsonpointer
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.2.0
github.com/xeipuuv/gojsonschema
# github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940
## explicit
# github.com/yvasiyarov/gorelic v0.0.7
## explicit
# github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9
## explicit
# go.opencensus.io v0.22.3
go.opencensus.io
go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.6.0
go.uber.org/atomic
# go.uber.org/multierr v1.5.0
go.uber.org/multierr
# go.uber.org/zap v1.15.0
go.uber.org/zap
go.uber.org/zap/buffer
go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
# golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
golang.org/x/crypto/openpgp/clearsign
golang.org/x/crypto/openpgp/elgamal
golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20210119194325-5f4716e94777
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/html/charset
golang.org/x/net/http/httpguts
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/socks
golang.org/x/net/proxy
golang.org/x/net/websocket
# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/oauth2
golang.org/x/oauth2/google
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
golang.org/x/sys/execabs
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
golang.org/x/term
# golang.org/x/text v0.3.4
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
golang.org/x/text/encoding/internal
golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/japanese
golang.org/x/text/encoding/korean
golang.org/x/text/encoding/simplifiedchinese
golang.org/x/text/encoding/traditionalchinese
golang.org/x/text/encoding/unicode
golang.org/x/text/internal/language
golang.org/x/text/internal/language/compact
golang.org/x/text/internal/tag
golang.org/x/text/internal/utf8internal
golang.org/x/text/language
golang.org/x/text/runes
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
golang.org/x/time/rate
# gomodules.xyz/jsonpatch/v2 v2.1.0
gomodules.xyz/jsonpatch/v2
# google.golang.org/appengine v1.6.6
google.golang.org/appengine
google.golang.org/appengine/internal
google.golang.org/appengine/internal/app_identity
google.golang.org/appengine/internal/base
google.golang.org/appengine/internal/datastore
google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.27.1
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/grpclog
google.golang.org/grpc/internal
google.golang.org/grpc/status
# google.golang.org/protobuf v1.25.0
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
google.golang.org/protobuf/internal/descfmt
google.golang.org/protobuf/internal/descopts
google.golang.org/protobuf/internal/detrand
google.golang.org/protobuf/internal/encoding/defval
google.golang.org/protobuf/internal/encoding/messageset
google.golang.org/protobuf/internal/encoding/tag
google.golang.org/protobuf/internal/encoding/text
google.golang.org/protobuf/internal/errors
google.golang.org/protobuf/internal/fieldsort
google.golang.org/protobuf/internal/filedesc
google.golang.org/protobuf/internal/filetype
google.golang.org/protobuf/internal/flags
google.golang.org/protobuf/internal/genid
google.golang.org/protobuf/internal/impl
google.golang.org/protobuf/internal/mapsort
google.golang.org/protobuf/internal/pragma
google.golang.org/protobuf/internal/set
google.golang.org/protobuf/internal/strs
google.golang.org/protobuf/internal/version
google.golang.org/protobuf/proto
google.golang.org/protobuf/reflect/protoreflect
google.golang.org/protobuf/reflect/protoregistry
google.golang.org/protobuf/runtime/protoiface
google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/gorp.v1 v1.7.2
gopkg.in/gorp.v1
# gopkg.in/inf.v0 v0.9.1
gopkg.in/inf.v0
# gopkg.in/ini.v1 v1.51.0
gopkg.in/ini.v1
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gopkg.in/yaml.v3
# gorm.io/driver/mysql v1.0.3
gorm.io/driver/mysql
# gorm.io/driver/sqlite v1.1.4
gorm.io/driver/sqlite
# gorm.io/gorm v1.20.8
gorm.io/gorm
gorm.io/gorm/callbacks
gorm.io/gorm/clause
gorm.io/gorm/logger
gorm.io/gorm/migrator
gorm.io/gorm/schema
gorm.io/gorm/utils
# helm.sh/helm/v3 v3.5.2
## explicit
helm.sh/helm/v3/internal/experimental/registry
helm.sh/helm/v3/internal/fileutil
helm.sh/helm/v3/internal/ignore
helm.sh/helm/v3/internal/resolver
helm.sh/helm/v3/internal/sympath
helm.sh/helm/v3/internal/third_party/dep/fs
helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util
helm.sh/helm/v3/internal/tlsutil
helm.sh/helm/v3/internal/urlutil
helm.sh/helm/v3/internal/version
helm.sh/helm/v3/pkg/action
helm.sh/helm/v3/pkg/chart
helm.sh/helm/v3/pkg/chart/loader
helm.sh/helm/v3/pkg/chartutil
helm.sh/helm/v3/pkg/cli
helm.sh/helm/v3/pkg/downloader
helm.sh/helm/v3/pkg/engine
helm.sh/helm/v3/pkg/gates
helm.sh/helm/v3/pkg/getter
helm.sh/helm/v3/pkg/helmpath
helm.sh/helm/v3/pkg/helmpath/xdg
helm.sh/helm/v3/pkg/kube
helm.sh/helm/v3/pkg/kube/fake
helm.sh/helm/v3/pkg/lint
helm.sh/helm/v3/pkg/lint/rules
helm.sh/helm/v3/pkg/lint/support
helm.sh/helm/v3/pkg/plugin
helm.sh/helm/v3/pkg/postrender
helm.sh/helm/v3/pkg/provenance
helm.sh/helm/v3/pkg/release
helm.sh/helm/v3/pkg/releaseutil
helm.sh/helm/v3/pkg/repo
helm.sh/helm/v3/pkg/storage
helm.sh/helm/v3/pkg/storage/driver
helm.sh/helm/v3/pkg/time
# k8s.io/api v0.20.4
## explicit
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1beta1
k8s.io/api/apiserverinternal/v1alpha1
k8s.io/api/apps/v1
k8s.io/api/apps/v1beta1
k8s.io/api/apps/v1beta2
k8s.io/api/authentication/v1
k8s.io/api/authentication/v1beta1
k8s.io/api/authorization/v1
k8s.io/api/authorization/v1beta1
k8s.io/api/autoscaling/v1
k8s.io/api/autoscaling/v2beta1
k8s.io/api/autoscaling/v2beta2
k8s.io/api/batch/v1
k8s.io/api/batch/v1beta1
k8s.io/api/batch/v2alpha1
k8s.io/api/certificates/v1
k8s.io/api/certificates/v1beta1
k8s.io/api/coordination/v1
k8s.io/api/coordination/v1beta1
k8s.io/api/core/v1
k8s.io/api/discovery/v1alpha1
k8s.io/api/discovery/v1beta1
k8s.io/api/events/v1
k8s.io/api/events/v1beta1
k8s.io/api/extensions/v1beta1
k8s.io/api/flowcontrol/v1alpha1
k8s.io/api/flowcontrol/v1beta1
k8s.io/api/imagepolicy/v1alpha1
k8s.io/api/networking/v1
k8s.io/api/networking/v1beta1
k8s.io/api/node/v1
k8s.io/api/node/v1alpha1
k8s.io/api/node/v1beta1
k8s.io/api/policy/v1beta1
k8s.io/api/rbac/v1
k8s.io/api/rbac/v1alpha1
k8s.io/api/rbac/v1beta1
k8s.io/api/scheduling/v1
k8s.io/api/scheduling/v1alpha1
k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.20.2
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
# k8s.io/apimachinery v0.20.4
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/api/validation
k8s.io/apimachinery/pkg/apis/meta/internalversion
k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme
k8s.io/apimachinery/pkg/apis/meta/v1/validation
k8s.io/apimachinery/pkg/apis/meta/v1beta1
k8s.io/apimachinery/pkg/conversion
k8s.io/apimachinery/pkg/conversion/queryparams
k8s.io/apimachinery/pkg/fields
k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/runtime/serializer
k8s.io/apimachinery/pkg/runtime/serializer/json
k8s.io/apimachinery/pkg/runtime/serializer/protobuf
k8s.io/apimachinery/pkg/runtime/serializer/recognizer
k8s.io/apimachinery/pkg/runtime/serializer/streaming
k8s.io/apimachinery/pkg/runtime/serializer/versioning
k8s.io/apimachinery/pkg/selection
k8s.io/apimachinery/pkg/types
k8s.io/apimachinery/pkg/util/cache
k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/diff
k8s.io/apimachinery/pkg/util/duration
k8s.io/apimachinery/pkg/util/errors
k8s.io/apimachinery/pkg/util/framer
k8s.io/apimachinery/pkg/util/httpstream
k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/mergepatch
k8s.io/apimachinery/pkg/util/naming
k8s.io/apimachinery/pkg/util/net
k8s.io/apimachinery/pkg/util/rand
k8s.io/apimachinery/pkg/util/remotecommand
k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/util/sets
k8s.io/apimachinery/pkg/util/strategicpatch
k8s.io/apimachinery/pkg/util/uuid
k8s.io/apimachinery/pkg/util/validation
k8s.io/apimachinery/pkg/util/validation/field
k8s.io/apimachinery/pkg/util/wait
k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.20.2
k8s.io/apiserver/pkg/endpoints/deprecation
# k8s.io/cli-runtime v0.20.2
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v0.20.4
## explicit
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached/disk
k8s.io/client-go/dynamic
k8s.io/client-go/kubernetes
k8s.io/client-go/kubernetes/scheme
k8s.io/client-go/kubernetes/typed/admissionregistration/v1
k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1
k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1
k8s.io/client-go/kubernetes/typed/apps/v1
k8s.io/client-go/kubernetes/typed/apps/v1beta1
k8s.io/client-go/kubernetes/typed/apps/v1beta2
k8s.io/client-go/kubernetes/typed/authentication/v1
k8s.io/client-go/kubernetes/typed/authentication/v1beta1
k8s.io/client-go/kubernetes/typed/authorization/v1
k8s.io/client-go/kubernetes/typed/authorization/v1beta1
k8s.io/client-go/kubernetes/typed/autoscaling/v1
k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1
k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2
k8s.io/client-go/kubernetes/typed/batch/v1
k8s.io/client-go/kubernetes/typed/batch/v1beta1
k8s.io/client-go/kubernetes/typed/batch/v2alpha1
k8s.io/client-go/kubernetes/typed/certificates/v1
k8s.io/client-go/kubernetes/typed/certificates/v1beta1
k8s.io/client-go/kubernetes/typed/coordination/v1
k8s.io/client-go/kubernetes/typed/coordination/v1beta1
k8s.io/client-go/kubernetes/typed/core/v1
k8s.io/client-go/kubernetes/typed/discovery/v1alpha1
k8s.io/client-go/kubernetes/typed/discovery/v1beta1
k8s.io/client-go/kubernetes/typed/events/v1
k8s.io/client-go/kubernetes/typed/events/v1beta1
k8s.io/client-go/kubernetes/typed/extensions/v1beta1
k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1
k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1
k8s.io/client-go/kubernetes/typed/networking/v1
k8s.io/client-go/kubernetes/typed/networking/v1beta1
k8s.io/client-go/kubernetes/typed/node/v1
k8s.io/client-go/kubernetes/typed/node/v1alpha1
k8s.io/client-go/kubernetes/typed/node/v1beta1
k8s.io/client-go/kubernetes/typed/policy/v1beta1
k8s.io/client-go/kubernetes/typed/rbac/v1
k8s.io/client-go/kubernetes/typed/rbac/v1alpha1
k8s.io/client-go/kubernetes/typed/rbac/v1beta1
k8s.io/client-go/kubernetes/typed/scheduling/v1
k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1
k8s.io/client-go/kubernetes/typed/scheduling/v1beta1
k8s.io/client-go/kubernetes/typed/storage/v1
k8s.io/client-go/kubernetes/typed/storage/v1alpha1
k8s.io/client-go/kubernetes/typed/storage/v1beta1
k8s.io/client-go/metadata
k8s.io/client-go/pkg/apis/clientauthentication
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/pkg/version
k8s.io/client-go/plugin/pkg/client/auth/exec
k8s.io/client-go/plugin/pkg/client/auth/gcp
k8s.io/client-go/rest
k8s.io/client-go/rest/watch
k8s.io/client-go/restmapper
k8s.io/client-go/scale
k8s.io/client-go/scale/scheme
k8s.io/client-go/scale/scheme/appsint
k8s.io/client-go/scale/scheme/appsv1beta1
k8s.io/client-go/scale/scheme/appsv1beta2
k8s.io/client-go/scale/scheme/autoscalingv1
k8s.io/client-go/scale/scheme/extensionsint
k8s.io/client-go/scale/scheme/extensionsv1beta1
k8s.io/client-go/third_party/forked/golang/template
k8s.io/client-go/tools/auth
k8s.io/client-go/tools/cache
k8s.io/client-go/tools/clientcmd
k8s.io/client-go/tools/clientcmd/api
k8s.io/client-go/tools/clientcmd/api/latest
k8s.io/client-go/tools/clientcmd/api/v1
k8s.io/client-go/tools/leaderelection
k8s.io/client-go/tools/leaderelection/resourcelock
k8s.io/client-go/tools/metrics
k8s.io/client-go/tools/pager
k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
k8s.io/client-go/tools/remotecommand
k8s.io/client-go/tools/watch
k8s.io/client-go/transport
k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
k8s.io/client-go/util/connrotation
k8s.io/client-go/util/exec
k8s.io/client-go/util/flowcontrol
k8s.io/client-go/util/homedir
k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/workqueue
# k8s.io/component-base v0.20.2
k8s.io/component-base/config
k8s.io/component-base/config/v1alpha1
k8s.io/component-base/version
# k8s.io/klog/v2 v2.4.0
k8s.io/klog/v2
# k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
k8s.io/kube-openapi/pkg/common
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kubectl v0.20.1
k8s.io/kubectl/pkg/cmd/util
k8s.io/kubectl/pkg/scheme
k8s.io/kubectl/pkg/util/interrupt
k8s.io/kubectl/pkg/util/openapi
k8s.io/kubectl/pkg/util/openapi/validation
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
k8s.io/utils/buffer
k8s.io/utils/exec
k8s.io/utils/integer
k8s.io/utils/pointer
k8s.io/utils/trace
# sigs.k8s.io/controller-runtime v0.8.3
## explicit
sigs.k8s.io/controller-runtime
sigs.k8s.io/controller-runtime/pkg/builder
sigs.k8s.io/controller-runtime/pkg/cache
sigs.k8s.io/controller-runtime/pkg/cache/internal
sigs.k8s.io/controller-runtime/pkg/client
sigs.k8s.io/controller-runtime/pkg/client/apiutil
sigs.k8s.io/controller-runtime/pkg/client/config
sigs.k8s.io/controller-runtime/pkg/cluster
sigs.k8s.io/controller-runtime/pkg/config
sigs.k8s.io/controller-runtime/pkg/config/v1alpha1
sigs.k8s.io/controller-runtime/pkg/controller
sigs.k8s.io/controller-runtime/pkg/controller/controllerutil
sigs.k8s.io/controller-runtime/pkg/conversion
sigs.k8s.io/controller-runtime/pkg/envtest
sigs.k8s.io/controller-runtime/pkg/envtest/printer
sigs.k8s.io/controller-runtime/pkg/event
sigs.k8s.io/controller-runtime/pkg/handler
sigs.k8s.io/controller-runtime/pkg/healthz
sigs.k8s.io/controller-runtime/pkg/internal/controller
sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics
sigs.k8s.io/controller-runtime/pkg/internal/log
sigs.k8s.io/controller-runtime/pkg/internal/recorder
sigs.k8s.io/controller-runtime/pkg/internal/testing/integration
sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/addr
sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal
sigs.k8s.io/controller-runtime/pkg/leaderelection
sigs.k8s.io/controller-runtime/pkg/log
sigs.k8s.io/controller-runtime/pkg/log/zap
sigs.k8s.io/controller-runtime/pkg/manager
sigs.k8s.io/controller-runtime/pkg/manager/signals
sigs.k8s.io/controller-runtime/pkg/metrics
sigs.k8s.io/controller-runtime/pkg/predicate
sigs.k8s.io/controller-runtime/pkg/ratelimiter
sigs.k8s.io/controller-runtime/pkg/reconcile
sigs.k8s.io/controller-runtime/pkg/recorder
sigs.k8s.io/controller-runtime/pkg/runtime/inject
sigs.k8s.io/controller-runtime/pkg/scheme
sigs.k8s.io/controller-runtime/pkg/source
sigs.k8s.io/controller-runtime/pkg/source/internal
sigs.k8s.io/controller-runtime/pkg/webhook
sigs.k8s.io/controller-runtime/pkg/webhook/admission
sigs.k8s.io/controller-runtime/pkg/webhook/conversion
sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher
sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
# sigs.k8s.io/kustomize v2.0.3+incompatible
sigs.k8s.io/kustomize/pkg/commands/build
sigs.k8s.io/kustomize/pkg/constants
sigs.k8s.io/kustomize/pkg/expansion
sigs.k8s.io/kustomize/pkg/factory
sigs.k8s.io/kustomize/pkg/fs
sigs.k8s.io/kustomize/pkg/git
sigs.k8s.io/kustomize/pkg/gvk
sigs.k8s.io/kustomize/pkg/ifc
sigs.k8s.io/kustomize/pkg/ifc/transformer
sigs.k8s.io/kustomize/pkg/image
sigs.k8s.io/kustomize/pkg/internal/error
sigs.k8s.io/kustomize/pkg/loader
sigs.k8s.io/kustomize/pkg/patch
sigs.k8s.io/kustomize/pkg/patch/transformer
sigs.k8s.io/kustomize/pkg/resid
sigs.k8s.io/kustomize/pkg/resmap
sigs.k8s.io/kustomize/pkg/resource
sigs.k8s.io/kustomize/pkg/target
sigs.k8s.io/kustomize/pkg/transformers
sigs.k8s.io/kustomize/pkg/transformers/config
sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig
sigs.k8s.io/kustomize/pkg/types
# sigs.k8s.io/structured-merge-diff/v4 v4.0.2
sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml
# github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
# github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309
# github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.4
| 8,525 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/config.go | package toml
import (
"fmt"
"io"
"reflect"
"strings"
stringutil "github.com/naoina/go-stringutil"
"github.com/naoina/toml/ast"
)
// Config contains options for encoding and decoding.
type Config struct {
// NormFieldName is used to match TOML keys to struct fields. The function runs for
// both input keys and struct field names and should return a string that makes the
// two match. You must set this field to use the decoder.
//
// Example: The function in the default config removes _ and lowercases all keys. This
// allows a key called 'api_key' to match the struct field 'APIKey' because both are
// normalized to 'apikey'.
//
// Note that NormFieldName is not used for fields which define a TOML
// key through the struct tag.
NormFieldName func(typ reflect.Type, keyOrField string) string
// FieldToKey determines the TOML key of a struct field when encoding.
// You must set this field to use the encoder.
//
// Note that FieldToKey is not used for fields which define a TOML
// key through the struct tag.
FieldToKey func(typ reflect.Type, field string) string
// MissingField, if non-nil, is called when the decoder encounters a key for which no
// matching struct field exists. The default behavior is to return an error.
MissingField func(typ reflect.Type, key string) error
}
// DefaultConfig contains the default options for encoding and decoding.
// Snake case (i.e. 'foo_bar') is used for key names.
var DefaultConfig = Config{
NormFieldName: defaultNormFieldName,
FieldToKey: snakeCase,
}
func defaultNormFieldName(typ reflect.Type, s string) string {
return strings.Replace(strings.ToLower(s), "_", "", -1)
}
func snakeCase(typ reflect.Type, s string) string {
return stringutil.ToSnakeCase(s)
}
func defaultMissingField(typ reflect.Type, key string) error {
return fmt.Errorf("field corresponding to `%s' is not defined in %v", key, typ)
}
// NewEncoder returns a new Encoder that writes to w.
// It is shorthand for DefaultConfig.NewEncoder(w).
func NewEncoder(w io.Writer) *Encoder {
return DefaultConfig.NewEncoder(w)
}
// Marshal returns the TOML encoding of v.
// It is shorthand for DefaultConfig.Marshal(v).
func Marshal(v interface{}) ([]byte, error) {
return DefaultConfig.Marshal(v)
}
// Unmarshal parses the TOML data and stores the result in the value pointed to by v.
// It is shorthand for DefaultConfig.Unmarshal(data, v).
func Unmarshal(data []byte, v interface{}) error {
return DefaultConfig.Unmarshal(data, v)
}
// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v.
// It is shorthand for DefaultConfig.UnmarshalTable(t, v).
func UnmarshalTable(t *ast.Table, v interface{}) error {
return DefaultConfig.UnmarshalTable(t, v)
}
// NewDecoder returns a new Decoder that reads from r.
// It is shorthand for DefaultConfig.NewDecoder(r).
func NewDecoder(r io.Reader) *Decoder {
return DefaultConfig.NewDecoder(r)
}
| 8,526 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/encode.go | package toml
import (
"bytes"
"encoding"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"time"
"github.com/naoina/toml/ast"
)
const (
tagOmitempty = "omitempty"
tagSkip = "-"
)
// Marshal returns the TOML encoding of v.
//
// Struct values encode as TOML. Each exported struct field becomes a field of
// the TOML structure unless
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option.
//
// The "toml" key in the struct field's tag value is the key name, followed by
// an optional comma and options. Examples:
//
// // Field is ignored by this package.
// Field int `toml:"-"`
//
// // Field appears in TOML as key "myName".
// Field int `toml:"myName"`
//
// // Field appears in TOML as key "myName" and the field is omitted from the
// // result of encoding if its value is empty.
// Field int `toml:"myName,omitempty"`
//
// // Field appears in TOML as key "field", but the field is skipped if
// // empty. Note the leading comma.
// Field int `toml:",omitempty"`
func (cfg *Config) Marshal(v interface{}) ([]byte, error) {
buf := new(bytes.Buffer)
err := cfg.NewEncoder(buf).Encode(v)
return buf.Bytes(), err
}
// A Encoder writes TOML to an output stream.
type Encoder struct {
w io.Writer
cfg *Config
}
// NewEncoder returns a new Encoder that writes to w.
func (cfg *Config) NewEncoder(w io.Writer) *Encoder {
return &Encoder{w, cfg}
}
// Encode writes the TOML of v to the stream.
// See the documentation for Marshal for details about the conversion of Go values to TOML.
func (e *Encoder) Encode(v interface{}) error {
rv := reflect.ValueOf(v)
for rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return &marshalNilError{rv.Type()}
}
rv = rv.Elem()
}
buf := &tableBuf{typ: ast.TableTypeNormal}
var err error
switch rv.Kind() {
case reflect.Struct:
err = buf.structFields(e.cfg, rv)
case reflect.Map:
err = buf.mapFields(e.cfg, rv)
default:
err = &marshalTableError{rv.Type()}
}
if err != nil {
return err
}
return buf.writeTo(e.w, "")
}
// Marshaler can be implemented to override the encoding of TOML values. The returned text
// must be a simple TOML value (i.e. not a table) and is inserted into marshaler output.
//
// This interface exists for backwards-compatibility reasons. You probably want to
// implement encoding.TextMarshaler or MarshalerRec instead.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
// MarshalerRec can be implemented to override the TOML encoding of a type.
// The returned value is marshaled in place of the receiver.
type MarshalerRec interface {
MarshalTOML() (interface{}, error)
}
type tableBuf struct {
name string // already escaped / quoted
body []byte
children []*tableBuf
typ ast.TableType
arrayDepth int
}
func (b *tableBuf) writeTo(w io.Writer, prefix string) error {
key := b.name // TODO: escape dots
if prefix != "" {
key = prefix + "." + key
}
if b.name != "" {
head := "[" + key + "]"
if b.typ == ast.TableTypeArray {
head = "[" + head + "]"
}
head += "\n"
if _, err := io.WriteString(w, head); err != nil {
return err
}
}
if _, err := w.Write(b.body); err != nil {
return err
}
for i, child := range b.children {
if len(b.body) > 0 || i > 0 {
if _, err := w.Write([]byte("\n")); err != nil {
return err
}
}
if err := child.writeTo(w, key); err != nil {
return err
}
}
return nil
}
func (b *tableBuf) newChild(name string) *tableBuf {
child := &tableBuf{name: quoteName(name), typ: ast.TableTypeNormal}
if b.arrayDepth > 0 {
child.typ = ast.TableTypeArray
}
return child
}
func (b *tableBuf) addChild(child *tableBuf) {
// Empty table elision: we can avoid writing a table that doesn't have any keys on its
// own. Array tables can't be elided because they define array elements (which would
// be missing if elided).
if len(child.body) == 0 && child.typ == ast.TableTypeNormal {
for _, gchild := range child.children {
gchild.name = child.name + "." + gchild.name
b.addChild(gchild)
}
return
}
b.children = append(b.children, child)
}
func (b *tableBuf) structFields(cfg *Config, rv reflect.Value) error {
rt := rv.Type()
for i := 0; i < rv.NumField(); i++ {
ft := rt.Field(i)
if ft.PkgPath != "" && !ft.Anonymous { // not exported
continue
}
name, rest := extractTag(ft.Tag.Get(fieldTagName))
if name == tagSkip {
continue
}
fv := rv.Field(i)
if rest == tagOmitempty && isEmptyValue(fv) {
continue
}
if name == "" {
name = cfg.FieldToKey(rt, ft.Name)
}
if err := b.field(cfg, name, fv); err != nil {
return err
}
}
return nil
}
type mapKeyList []struct {
key string
value reflect.Value
}
func (l mapKeyList) Len() int { return len(l) }
func (l mapKeyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l mapKeyList) Less(i, j int) bool { return l[i].key < l[j].key }
func (b *tableBuf) mapFields(cfg *Config, rv reflect.Value) error {
keys := rv.MapKeys()
keylist := make(mapKeyList, len(keys))
for i, key := range keys {
var err error
keylist[i].key, err = encodeMapKey(key)
if err != nil {
return err
}
keylist[i].value = rv.MapIndex(key)
}
sort.Sort(keylist)
for _, kv := range keylist {
if err := b.field(cfg, kv.key, kv.value); err != nil {
return err
}
}
return nil
}
func (b *tableBuf) field(cfg *Config, name string, rv reflect.Value) error {
off := len(b.body)
b.body = append(b.body, quoteName(name)...)
b.body = append(b.body, " = "...)
isTable, err := b.value(cfg, rv, name)
if isTable {
b.body = b.body[:off] // rub out "key ="
} else {
b.body = append(b.body, '\n')
}
return err
}
func (b *tableBuf) value(cfg *Config, rv reflect.Value, name string) (bool, error) {
isMarshaler, isTable, err := b.marshaler(cfg, rv, name)
if isMarshaler {
return isTable, err
}
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
b.body = strconv.AppendInt(b.body, rv.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
b.body = strconv.AppendUint(b.body, rv.Uint(), 10)
case reflect.Float32, reflect.Float64:
b.body = strconv.AppendFloat(b.body, rv.Float(), 'e', -1, 64)
case reflect.Bool:
b.body = strconv.AppendBool(b.body, rv.Bool())
case reflect.String:
b.body = strconv.AppendQuote(b.body, rv.String())
case reflect.Ptr, reflect.Interface:
if rv.IsNil() {
return false, &marshalNilError{rv.Type()}
}
return b.value(cfg, rv.Elem(), name)
case reflect.Slice, reflect.Array:
rvlen := rv.Len()
if rvlen == 0 {
b.body = append(b.body, '[', ']')
return false, nil
}
b.arrayDepth++
wroteElem := false
b.body = append(b.body, '[')
for i := 0; i < rvlen; i++ {
isTable, err := b.value(cfg, rv.Index(i), name)
if err != nil {
return isTable, err
}
wroteElem = wroteElem || !isTable
if wroteElem {
if i < rvlen-1 {
b.body = append(b.body, ',', ' ')
} else {
b.body = append(b.body, ']')
}
}
}
if !wroteElem {
b.body = b.body[:len(b.body)-1] // rub out '['
}
b.arrayDepth--
return !wroteElem, nil
case reflect.Struct:
child := b.newChild(name)
err := child.structFields(cfg, rv)
b.addChild(child)
return true, err
case reflect.Map:
child := b.newChild(name)
err := child.mapFields(cfg, rv)
b.addChild(child)
return true, err
default:
return false, fmt.Errorf("toml: marshal: unsupported type %v", rv.Kind())
}
return false, nil
}
func (b *tableBuf) marshaler(cfg *Config, rv reflect.Value, name string) (handled, isTable bool, err error) {
switch t := rv.Interface().(type) {
case encoding.TextMarshaler:
enc, err := t.MarshalText()
if err != nil {
return true, false, err
}
b.body = encodeTextMarshaler(b.body, string(enc))
return true, false, nil
case MarshalerRec:
newval, err := t.MarshalTOML()
if err != nil {
return true, false, err
}
isTable, err = b.value(cfg, reflect.ValueOf(newval), name)
return true, isTable, err
case Marshaler:
enc, err := t.MarshalTOML()
if err != nil {
return true, false, err
}
b.body = append(b.body, enc...)
return true, false, nil
}
return false, false, nil
}
func encodeTextMarshaler(buf []byte, v string) []byte {
// Emit the value without quotes if possible.
if v == "true" || v == "false" {
return append(buf, v...)
} else if _, err := time.Parse(time.RFC3339Nano, v); err == nil {
return append(buf, v...)
} else if _, err := strconv.ParseInt(v, 10, 64); err == nil {
return append(buf, v...)
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
return append(buf, v...)
} else if _, err := strconv.ParseFloat(v, 64); err == nil {
return append(buf, v...)
}
return strconv.AppendQuote(buf, v)
}
func encodeMapKey(rv reflect.Value) (string, error) {
if rv.Kind() == reflect.String {
return rv.String(), nil
}
if tm, ok := rv.Interface().(encoding.TextMarshaler); ok {
b, err := tm.MarshalText()
return string(b), err
}
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(rv.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(rv.Uint(), 10), nil
}
return "", fmt.Errorf("toml: invalid map key type %v", rv.Type())
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array:
// encoding/json treats all arrays with non-zero length as non-empty. We check the
// array content here because zero-length arrays are almost never used.
len := v.Len()
for i := 0; i < len; i++ {
if !isEmptyValue(v.Index(i)) {
return false
}
}
return true
case reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func quoteName(s string) string {
if len(s) == 0 {
return strconv.Quote(s)
}
for _, r := range s {
if r >= '0' && r <= '9' || r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' || r == '-' || r == '_' {
continue
}
return strconv.Quote(s)
}
return s
}
| 8,527 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/decode.go | // Package toml encodes and decodes the TOML configuration format using reflection.
//
// This library is compatible with TOML version v0.4.0.
package toml
import (
"encoding"
"fmt"
"io"
"io/ioutil"
"reflect"
"strconv"
"strings"
"time"
"github.com/naoina/toml/ast"
)
const (
tableSeparator = '.'
)
var (
escapeReplacer = strings.NewReplacer(
"\b", "\\n",
"\f", "\\f",
"\n", "\\n",
"\r", "\\r",
"\t", "\\t",
)
underscoreReplacer = strings.NewReplacer(
"_", "",
)
)
var timeType = reflect.TypeOf(time.Time{})
// Unmarshal parses the TOML data and stores the result in the value pointed to by v.
//
// Unmarshal will mapped to v that according to following rules:
//
// TOML strings to string
// TOML integers to any int type
// TOML floats to float32 or float64
// TOML booleans to bool
// TOML datetimes to time.Time
// TOML arrays to any type of slice
// TOML tables to struct or map
// TOML array tables to slice of struct or map
func (cfg *Config) Unmarshal(data []byte, v interface{}) error {
table, err := Parse(data)
if err != nil {
return err
}
if err := cfg.UnmarshalTable(table, v); err != nil {
return err
}
return nil
}
// A Decoder reads and decodes TOML from an input stream.
type Decoder struct {
r io.Reader
cfg *Config
}
// NewDecoder returns a new Decoder that reads from r.
// Note that it reads all from r before parsing it.
func (cfg *Config) NewDecoder(r io.Reader) *Decoder {
return &Decoder{r, cfg}
}
// Decode parses the TOML data from its input and stores it in the value pointed to by v.
// See the documentation for Unmarshal for details about the conversion of TOML into a Go value.
func (d *Decoder) Decode(v interface{}) error {
b, err := ioutil.ReadAll(d.r)
if err != nil {
return err
}
return d.cfg.Unmarshal(b, v)
}
// UnmarshalerRec may be implemented by types to customize their behavior when being
// unmarshaled from TOML. You can use it to implement custom validation or to set
// unexported fields.
//
// UnmarshalTOML receives a function that can be called to unmarshal the original TOML
// value into a field or variable. It is safe to call the function more than once if
// necessary.
type UnmarshalerRec interface {
UnmarshalTOML(fn func(interface{}) error) error
}
// Unmarshaler can be used to capture and process raw TOML source of a table or value.
// UnmarshalTOML must copy the input if it wishes to retain it after returning.
//
// Note: this interface is retained for backwards compatibility. You probably want
// to implement encoding.TextUnmarshaler or UnmarshalerRec instead.
type Unmarshaler interface {
UnmarshalTOML(input []byte) error
}
// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v.
//
// UnmarshalTable will mapped to v that according to following rules:
//
// TOML strings to string
// TOML integers to any int type
// TOML floats to float32 or float64
// TOML booleans to bool
// TOML datetimes to time.Time
// TOML arrays to any type of slice
// TOML tables to struct or map
// TOML array tables to slice of struct or map
func (cfg *Config) UnmarshalTable(t *ast.Table, v interface{}) error {
rv := reflect.ValueOf(v)
toplevelMap := rv.Kind() == reflect.Map
if (!toplevelMap && rv.Kind() != reflect.Ptr) || rv.IsNil() {
return &invalidUnmarshalError{reflect.TypeOf(v)}
}
return unmarshalTable(cfg, rv, t, toplevelMap)
}
// used for UnmarshalerRec.
func unmarshalTableOrValue(cfg *Config, rv reflect.Value, av interface{}) error {
if (rv.Kind() != reflect.Ptr && rv.Kind() != reflect.Map) || rv.IsNil() {
return &invalidUnmarshalError{rv.Type()}
}
rv = indirect(rv)
switch av.(type) {
case *ast.KeyValue, *ast.Table, []*ast.Table:
if err := unmarshalField(cfg, rv, av); err != nil {
return lineError(fieldLineNumber(av), err)
}
return nil
case ast.Value:
return setValue(cfg, rv, av.(ast.Value))
default:
panic(fmt.Sprintf("BUG: unhandled AST node type %T", av))
}
}
// unmarshalTable unmarshals the fields of a table into a struct or map.
//
// toplevelMap is true when rv is an (unadressable) map given to UnmarshalTable. In this
// (special) case, the map is used as-is instead of creating a new map.
func unmarshalTable(cfg *Config, rv reflect.Value, t *ast.Table, toplevelMap bool) error {
rv = indirect(rv)
if err, ok := setUnmarshaler(cfg, rv, t); ok {
return lineError(t.Line, err)
}
switch {
case rv.Kind() == reflect.Struct:
fc := makeFieldCache(cfg, rv.Type())
for key, fieldAst := range t.Fields {
fv, fieldName, err := fc.findField(cfg, rv, key)
if err != nil {
return lineError(fieldLineNumber(fieldAst), err)
}
if fv.IsValid() {
if err := unmarshalField(cfg, fv, fieldAst); err != nil {
return lineErrorField(fieldLineNumber(fieldAst), rv.Type().String()+"."+fieldName, err)
}
}
}
case rv.Kind() == reflect.Map || isEface(rv):
m := rv
if !toplevelMap {
if rv.Kind() == reflect.Interface {
m = reflect.ValueOf(make(map[string]interface{}))
} else {
m = reflect.MakeMap(rv.Type())
}
}
elemtyp := m.Type().Elem()
for key, fieldAst := range t.Fields {
kv, err := unmarshalMapKey(m.Type().Key(), key)
if err != nil {
return lineError(fieldLineNumber(fieldAst), err)
}
fv := reflect.New(elemtyp).Elem()
if err := unmarshalField(cfg, fv, fieldAst); err != nil {
return lineError(fieldLineNumber(fieldAst), err)
}
m.SetMapIndex(kv, fv)
}
if !toplevelMap {
rv.Set(m)
}
default:
return lineError(t.Line, &unmarshalTypeError{"table", "struct or map", rv.Type()})
}
return nil
}
func fieldLineNumber(fieldAst interface{}) int {
switch av := fieldAst.(type) {
case *ast.KeyValue:
return av.Line
case *ast.Table:
return av.Line
case []*ast.Table:
return av[0].Line
default:
panic(fmt.Sprintf("BUG: unhandled node type %T", fieldAst))
}
}
func unmarshalField(cfg *Config, rv reflect.Value, fieldAst interface{}) error {
switch av := fieldAst.(type) {
case *ast.KeyValue:
return setValue(cfg, rv, av.Value)
case *ast.Table:
return unmarshalTable(cfg, rv, av, false)
case []*ast.Table:
rv = indirect(rv)
if err, ok := setUnmarshaler(cfg, rv, fieldAst); ok {
return err
}
var slice reflect.Value
switch {
case rv.Kind() == reflect.Slice:
slice = reflect.MakeSlice(rv.Type(), len(av), len(av))
case isEface(rv):
slice = reflect.ValueOf(make([]interface{}, len(av)))
default:
return &unmarshalTypeError{"array table", "slice", rv.Type()}
}
for i, tbl := range av {
vv := reflect.New(slice.Type().Elem()).Elem()
if err := unmarshalTable(cfg, vv, tbl, false); err != nil {
return err
}
slice.Index(i).Set(vv)
}
rv.Set(slice)
default:
panic(fmt.Sprintf("BUG: unhandled AST node type %T", av))
}
return nil
}
func unmarshalMapKey(typ reflect.Type, key string) (reflect.Value, error) {
rv := reflect.New(typ).Elem()
if u, ok := rv.Addr().Interface().(encoding.TextUnmarshaler); ok {
return rv, u.UnmarshalText([]byte(key))
}
switch typ.Kind() {
case reflect.String:
rv.SetString(key)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := strconv.ParseInt(key, 10, int(typ.Size()*8))
if err != nil {
return rv, convertNumError(typ.Kind(), err)
}
rv.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
i, err := strconv.ParseUint(key, 10, int(typ.Size()*8))
if err != nil {
return rv, convertNumError(typ.Kind(), err)
}
rv.SetUint(i)
default:
return rv, fmt.Errorf("invalid map key type %s", typ)
}
return rv, nil
}
func setValue(cfg *Config, lhs reflect.Value, val ast.Value) error {
lhs = indirect(lhs)
if err, ok := setUnmarshaler(cfg, lhs, val); ok {
return err
}
if err, ok := setTextUnmarshaler(lhs, val); ok {
return err
}
switch v := val.(type) {
case *ast.Integer:
return setInt(lhs, v)
case *ast.Float:
return setFloat(lhs, v)
case *ast.String:
return setString(lhs, v)
case *ast.Boolean:
return setBoolean(lhs, v)
case *ast.Datetime:
return setDatetime(lhs, v)
case *ast.Array:
return setArray(cfg, lhs, v)
default:
panic(fmt.Sprintf("BUG: unhandled node type %T", v))
}
}
func indirect(rv reflect.Value) reflect.Value {
for rv.Kind() == reflect.Ptr {
if rv.IsNil() {
rv.Set(reflect.New(rv.Type().Elem()))
}
rv = rv.Elem()
}
return rv
}
func setUnmarshaler(cfg *Config, lhs reflect.Value, av interface{}) (error, bool) {
if lhs.CanAddr() {
if u, ok := lhs.Addr().Interface().(UnmarshalerRec); ok {
err := u.UnmarshalTOML(func(v interface{}) error {
return unmarshalTableOrValue(cfg, reflect.ValueOf(v), av)
})
return err, true
}
if u, ok := lhs.Addr().Interface().(Unmarshaler); ok {
return u.UnmarshalTOML(unmarshalerSource(av)), true
}
}
return nil, false
}
func unmarshalerSource(av interface{}) []byte {
var source []byte
switch av := av.(type) {
case []*ast.Table:
for i, tab := range av {
source = append(source, tab.Source()...)
if i != len(av)-1 {
source = append(source, '\n')
}
}
case ast.Value:
source = []byte(av.Source())
default:
panic(fmt.Sprintf("BUG: unhandled node type %T", av))
}
return source
}
func setTextUnmarshaler(lhs reflect.Value, val ast.Value) (error, bool) {
if !lhs.CanAddr() {
return nil, false
}
u, ok := lhs.Addr().Interface().(encoding.TextUnmarshaler)
if !ok || lhs.Type() == timeType {
return nil, false
}
var data string
switch val := val.(type) {
case *ast.Array:
return &unmarshalTypeError{"array", "", lhs.Type()}, true
case *ast.String:
data = val.Value
default:
data = val.Source()
}
return u.UnmarshalText([]byte(data)), true
}
func setInt(fv reflect.Value, v *ast.Integer) error {
k := fv.Kind()
switch {
case k >= reflect.Int && k <= reflect.Int64:
i, err := strconv.ParseInt(v.Value, 10, int(fv.Type().Size()*8))
if err != nil {
return convertNumError(fv.Kind(), err)
}
fv.SetInt(i)
case k >= reflect.Uint && k <= reflect.Uintptr:
i, err := strconv.ParseUint(v.Value, 10, int(fv.Type().Size()*8))
if err != nil {
return convertNumError(fv.Kind(), err)
}
fv.SetUint(i)
case isEface(fv):
i, err := strconv.ParseInt(v.Value, 10, 64)
if err != nil {
return convertNumError(reflect.Int64, err)
}
fv.Set(reflect.ValueOf(i))
default:
return &unmarshalTypeError{"integer", "", fv.Type()}
}
return nil
}
func setFloat(fv reflect.Value, v *ast.Float) error {
f, err := v.Float()
if err != nil {
return err
}
switch {
case fv.Kind() == reflect.Float32 || fv.Kind() == reflect.Float64:
if fv.OverflowFloat(f) {
return &overflowError{fv.Kind(), v.Value}
}
fv.SetFloat(f)
case isEface(fv):
fv.Set(reflect.ValueOf(f))
default:
return &unmarshalTypeError{"float", "", fv.Type()}
}
return nil
}
func setString(fv reflect.Value, v *ast.String) error {
switch {
case fv.Kind() == reflect.String:
fv.SetString(v.Value)
case isEface(fv):
fv.Set(reflect.ValueOf(v.Value))
default:
return &unmarshalTypeError{"string", "", fv.Type()}
}
return nil
}
func setBoolean(fv reflect.Value, v *ast.Boolean) error {
b, _ := v.Boolean()
switch {
case fv.Kind() == reflect.Bool:
fv.SetBool(b)
case isEface(fv):
fv.Set(reflect.ValueOf(b))
default:
return &unmarshalTypeError{"boolean", "", fv.Type()}
}
return nil
}
func setDatetime(rv reflect.Value, v *ast.Datetime) error {
t, err := v.Time()
if err != nil {
return err
}
if !timeType.AssignableTo(rv.Type()) {
return &unmarshalTypeError{"datetime", "", rv.Type()}
}
rv.Set(reflect.ValueOf(t))
return nil
}
func setArray(cfg *Config, rv reflect.Value, v *ast.Array) error {
var slicetyp reflect.Type
switch {
case rv.Kind() == reflect.Slice:
slicetyp = rv.Type()
case isEface(rv):
slicetyp = reflect.SliceOf(rv.Type())
default:
return &unmarshalTypeError{"array", "slice", rv.Type()}
}
if len(v.Value) == 0 {
// Ensure defined slices are always set to a non-nil value.
rv.Set(reflect.MakeSlice(slicetyp, 0, 0))
return nil
}
tomltyp := reflect.TypeOf(v.Value[0])
slice := reflect.MakeSlice(slicetyp, len(v.Value), len(v.Value))
typ := slicetyp.Elem()
for i, vv := range v.Value {
if i > 0 && tomltyp != reflect.TypeOf(vv) {
return errArrayMultiType
}
tmp := reflect.New(typ).Elem()
if err := setValue(cfg, tmp, vv); err != nil {
return err
}
slice.Index(i).Set(tmp)
}
rv.Set(slice)
return nil
}
func isEface(rv reflect.Value) bool {
return rv.Kind() == reflect.Interface && rv.Type().NumMethod() == 0
}
| 8,528 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/error.go | package toml
import (
"errors"
"fmt"
"reflect"
"strconv"
)
var (
errArrayMultiType = errors.New("array can't contain multiple types")
)
// LineError is returned by Unmarshal, UnmarshalTable and Parse
// if the error is local to a line.
type LineError struct {
Line int
StructField string
Err error
}
func (err *LineError) Error() string {
field := ""
if err.StructField != "" {
field = "(" + err.StructField + ") "
}
return fmt.Sprintf("line %d: %s%v", err.Line, field, err.Err)
}
func lineError(line int, err error) error {
if err == nil {
return nil
}
if _, ok := err.(*LineError); ok {
return err
}
return &LineError{Line: line, Err: err}
}
func lineErrorField(line int, field string, err error) error {
if lerr, ok := err.(*LineError); ok {
return lerr
} else if err != nil {
err = &LineError{Line: line, StructField: field, Err: err}
}
return err
}
type overflowError struct {
kind reflect.Kind
v string
}
func (err *overflowError) Error() string {
return fmt.Sprintf("value %s is out of range for %v", err.v, err.kind)
}
func convertNumError(kind reflect.Kind, err error) error {
if numerr, ok := err.(*strconv.NumError); ok && numerr.Err == strconv.ErrRange {
return &overflowError{kind, numerr.Num}
}
return err
}
type invalidUnmarshalError struct {
typ reflect.Type
}
func (err *invalidUnmarshalError) Error() string {
if err.typ == nil {
return "toml: Unmarshal(nil)"
}
if err.typ.Kind() != reflect.Ptr {
return "toml: Unmarshal(non-pointer " + err.typ.String() + ")"
}
return "toml: Unmarshal(nil " + err.typ.String() + ")"
}
type unmarshalTypeError struct {
what string
want string
typ reflect.Type
}
func (err *unmarshalTypeError) Error() string {
msg := fmt.Sprintf("cannot unmarshal TOML %s into %s", err.what, err.typ)
if err.want != "" {
msg += " (need " + err.want + ")"
}
return msg
}
type marshalNilError struct {
typ reflect.Type
}
func (err *marshalNilError) Error() string {
return fmt.Sprintf("toml: cannot marshal nil %s", err.typ)
}
type marshalTableError struct {
typ reflect.Type
}
func (err *marshalTableError) Error() string {
return fmt.Sprintf("toml: cannot marshal %s as table, want struct or map type", err.typ)
}
| 8,529 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/README.md | # TOML parser and encoder library for Golang [](https://travis-ci.org/naoina/toml)
[TOML](https://github.com/toml-lang/toml) parser and encoder library for [Golang](http://golang.org/).
This library is compatible with TOML version [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md).
## Installation
go get -u github.com/naoina/toml
## Usage
The following TOML save as `example.toml`.
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Lance Uppercut"
dob = 1979-05-27T07:32:00-08:00 # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ]
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
Then above TOML will mapping to `tomlConfig` struct using `toml.Unmarshal`.
```go
package main
import (
"io/ioutil"
"os"
"time"
"github.com/naoina/toml"
)
type tomlConfig struct {
Title string
Owner struct {
Name string
Dob time.Time
}
Database struct {
Server string
Ports []int
ConnectionMax uint
Enabled bool
}
Servers map[string]ServerInfo
Clients struct {
Data [][]interface{}
Hosts []string
}
}
type ServerInfo struct {
IP net.IP
DC string
}
func main() {
f, err := os.Open("example.toml")
if err != nil {
panic(err)
}
defer f.Close()
var config Config
if err := toml.NewDecoder(f).Decode(&config); err != nil {
panic(err)
}
// then to use the unmarshaled config...
fmt.Println("IP of server 'alpha':", config.Servers["alpha"].IP)
}
```
## Mappings
A key and value of TOML will map to the corresponding field.
The fields of struct for mapping must be exported.
The rules of the mapping of key are following:
#### Exact matching
```toml
timeout_seconds = 256
```
```go
type Config struct {
Timeout_seconds int
}
```
#### Camelcase matching
```toml
server_name = "srv1"
```
```go
type Config struct {
ServerName string
}
```
#### Uppercase matching
```toml
ip = "10.0.0.1"
```
```go
type Config struct {
IP string
}
```
See the following examples for the value mappings.
### String
```toml
val = "string"
```
```go
type Config struct {
Val string
}
```
### Integer
```toml
val = 100
```
```go
type Config struct {
Val int
}
```
All types that can be used are following:
* int8 (from `-128` to `127`)
* int16 (from `-32768` to `32767`)
* int32 (from `-2147483648` to `2147483647`)
* int64 (from `-9223372036854775808` to `9223372036854775807`)
* int (same as `int32` on 32bit environment, or `int64` on 64bit environment)
* uint8 (from `0` to `255`)
* uint16 (from `0` to `65535`)
* uint32 (from `0` to `4294967295`)
* uint64 (from `0` to `18446744073709551615`)
* uint (same as `uint` on 32bit environment, or `uint64` on 64bit environment)
### Float
```toml
val = 3.1415
```
```go
type Config struct {
Val float32
}
```
All types that can be used are following:
* float32
* float64
### Boolean
```toml
val = true
```
```go
type Config struct {
Val bool
}
```
### Datetime
```toml
val = 2014-09-28T21:27:39Z
```
```go
type Config struct {
Val time.Time
}
```
### Array
```toml
val = ["a", "b", "c"]
```
```go
type Config struct {
Val []string
}
```
Also following examples all can be mapped:
```toml
val1 = [1, 2, 3]
val2 = [["a", "b"], ["c", "d"]]
val3 = [[1, 2, 3], ["a", "b", "c"]]
val4 = [[1, 2, 3], [["a", "b"], [true, false]]]
```
```go
type Config struct {
Val1 []int
Val2 [][]string
Val3 [][]interface{}
Val4 [][]interface{}
}
```
### Table
```toml
[server]
type = "app"
[server.development]
ip = "10.0.0.1"
[server.production]
ip = "10.0.0.2"
```
```go
type Config struct {
Server map[string]Server
}
type Server struct {
IP string
}
```
You can also use the following struct instead of map of struct.
```go
type Config struct {
Server struct {
Development Server
Production Server
}
}
type Server struct {
IP string
}
```
### Array of Tables
```toml
[[fruit]]
name = "apple"
[fruit.physical]
color = "red"
shape = "round"
[[fruit.variety]]
name = "red delicious"
[[fruit.variety]]
name = "granny smith"
[[fruit]]
name = "banana"
[[fruit.variety]]
name = "plantain"
```
```go
type Config struct {
Fruit []struct {
Name string
Physical struct {
Color string
Shape string
}
Variety []struct {
Name string
}
}
}
```
### Using the `encoding.TextUnmarshaler` interface
Package toml supports `encoding.TextUnmarshaler` (and `encoding.TextMarshaler`). You can
use it to apply custom marshaling rules for certain types. The `UnmarshalText` method is
called with the value text found in the TOML input. TOML strings are passed unquoted.
```toml
duration = "10s"
```
```go
import time
type Duration time.Duration
// UnmarshalText implements encoding.TextUnmarshaler
func (d *Duration) UnmarshalText(data []byte) error {
duration, err := time.ParseDuration(string(data))
if err == nil {
*d = Duration(duration)
}
return err
}
// MarshalText implements encoding.TextMarshaler
func (d Duration) MarshalText() ([]byte, error) {
return []byte(time.Duration(d).String()), nil
}
type ConfigWithDuration struct {
Duration Duration
}
```
### Using the `toml.UnmarshalerRec` interface
You can also override marshaling rules specifically for TOML using the `UnmarshalerRec`
and `MarshalerRec` interfaces. These are useful if you want to control how structs or
arrays are handled. You can apply additional validation or set unexported struct fields.
Note: `encoding.TextUnmarshaler` and `encoding.TextMarshaler` should be preferred for
simple (scalar) values because they're also compatible with other formats like JSON or
YAML.
[See the UnmarshalerRec example](https://godoc.org/github.com/naoina/toml/#example_UnmarshalerRec).
### Using the `toml.Unmarshaler` interface
If you want to deal with raw TOML syntax, use the `Unmarshaler` and `Marshaler`
interfaces. Their input and output is raw TOML syntax. As such, these interfaces are
useful if you want to handle TOML at the syntax level.
[See the Unmarshaler example](https://godoc.org/github.com/naoina/toml/#example_Unmarshaler).
## API documentation
See [Godoc](http://godoc.org/github.com/naoina/toml).
## License
MIT
| 8,530 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/parse.go | package toml
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/naoina/toml/ast"
)
// The parser is generated by github.com/pointlander/peg. To regenerate it, do:
//
// go get -u github.com/pointlander/peg
// go generate .
//go:generate peg -switch -inline parse.peg
var errParse = errors.New("invalid TOML syntax")
// Parse returns an AST representation of TOML.
// The toplevel is represented by a table.
func Parse(data []byte) (*ast.Table, error) {
d := &parseState{p: &tomlParser{Buffer: string(data)}}
d.init()
if err := d.parse(); err != nil {
return nil, err
}
return d.p.toml.table, nil
}
type parseState struct {
p *tomlParser
}
func (d *parseState) init() {
d.p.Init()
d.p.toml.init(d.p.buffer)
}
func (d *parseState) parse() error {
if err := d.p.Parse(); err != nil {
if err, ok := err.(*parseError); ok {
return lineError(err.Line(), errParse)
}
return err
}
return d.execute()
}
func (d *parseState) execute() (err error) {
defer func() {
if e := recover(); e != nil {
lerr, ok := e.(*LineError)
if !ok {
panic(e)
}
err = lerr
}
}()
d.p.Execute()
return nil
}
func (e *parseError) Line() int {
tokens := []token32{e.max}
positions, p := make([]int, 2*len(tokens)), 0
for _, token := range tokens {
positions[p], p = int(token.begin), p+1
positions[p], p = int(token.end), p+1
}
for _, t := range translatePositions(e.p.buffer, positions) {
if e.p.line < t.line {
e.p.line = t.line
}
}
return e.p.line
}
type stack struct {
key string
table *ast.Table
}
type array struct {
parent *array
child *array
current *ast.Array
line int
}
type toml struct {
table *ast.Table
line int
currentTable *ast.Table
s string
key string
tableKeys []string
val ast.Value
arr *array
stack []*stack
skip bool
}
func (p *toml) init(data []rune) {
p.line = 1
p.table = p.newTable(ast.TableTypeNormal, "")
p.table.Position.End = len(data) - 1
p.table.Data = data[:len(data)-1] // truncate the end_symbol added by PEG parse generator.
p.currentTable = p.table
}
func (p *toml) Error(err error) {
panic(lineError(p.line, err))
}
func (p *tomlParser) SetTime(begin, end int) {
p.val = &ast.Datetime{
Position: ast.Position{Begin: begin, End: end},
Data: p.buffer[begin:end],
Value: string(p.buffer[begin:end]),
}
}
func (p *tomlParser) SetFloat64(begin, end int) {
p.val = &ast.Float{
Position: ast.Position{Begin: begin, End: end},
Data: p.buffer[begin:end],
Value: underscoreReplacer.Replace(string(p.buffer[begin:end])),
}
}
func (p *tomlParser) SetInt64(begin, end int) {
p.val = &ast.Integer{
Position: ast.Position{Begin: begin, End: end},
Data: p.buffer[begin:end],
Value: underscoreReplacer.Replace(string(p.buffer[begin:end])),
}
}
func (p *tomlParser) SetString(begin, end int) {
p.val = &ast.String{
Position: ast.Position{Begin: begin, End: end},
Data: p.buffer[begin:end],
Value: p.s,
}
p.s = ""
}
func (p *tomlParser) SetBool(begin, end int) {
p.val = &ast.Boolean{
Position: ast.Position{Begin: begin, End: end},
Data: p.buffer[begin:end],
Value: string(p.buffer[begin:end]),
}
}
func (p *tomlParser) StartArray() {
if p.arr == nil {
p.arr = &array{line: p.line, current: &ast.Array{}}
return
}
p.arr.child = &array{parent: p.arr, line: p.line, current: &ast.Array{}}
p.arr = p.arr.child
}
func (p *tomlParser) AddArrayVal() {
if p.arr.current == nil {
p.arr.current = &ast.Array{}
}
p.arr.current.Value = append(p.arr.current.Value, p.val)
}
func (p *tomlParser) SetArray(begin, end int) {
p.arr.current.Position = ast.Position{Begin: begin, End: end}
p.arr.current.Data = p.buffer[begin:end]
p.val = p.arr.current
p.arr = p.arr.parent
}
func (p *toml) SetTable(buf []rune, begin, end int) {
rawName := string(buf[begin:end])
p.setTable(p.table, rawName, p.tableKeys)
p.tableKeys = nil
}
func (p *toml) setTable(parent *ast.Table, name string, names []string) {
parent, err := p.lookupTable(parent, names[:len(names)-1])
if err != nil {
p.Error(err)
}
last := names[len(names)-1]
tbl := p.newTable(ast.TableTypeNormal, last)
switch v := parent.Fields[last].(type) {
case nil:
parent.Fields[last] = tbl
case []*ast.Table:
p.Error(fmt.Errorf("table `%s' is in conflict with array table in line %d", name, v[0].Line))
case *ast.Table:
if (v.Position == ast.Position{}) {
// This table was created as an implicit parent.
// Replace it with the real defined table.
tbl.Fields = v.Fields
parent.Fields[last] = tbl
} else {
p.Error(fmt.Errorf("table `%s' is in conflict with table in line %d", name, v.Line))
}
case *ast.KeyValue:
p.Error(fmt.Errorf("table `%s' is in conflict with line %d", name, v.Line))
default:
p.Error(fmt.Errorf("BUG: table `%s' is in conflict but it's unknown type `%T'", last, v))
}
p.currentTable = tbl
}
func (p *toml) newTable(typ ast.TableType, name string) *ast.Table {
return &ast.Table{
Line: p.line,
Name: name,
Type: typ,
Fields: make(map[string]interface{}),
}
}
func (p *tomlParser) SetTableString(begin, end int) {
p.currentTable.Data = p.buffer[begin:end]
p.currentTable.Position.Begin = begin
p.currentTable.Position.End = end
}
func (p *toml) SetArrayTable(buf []rune, begin, end int) {
rawName := string(buf[begin:end])
p.setArrayTable(p.table, rawName, p.tableKeys)
p.tableKeys = nil
}
func (p *toml) setArrayTable(parent *ast.Table, name string, names []string) {
parent, err := p.lookupTable(parent, names[:len(names)-1])
if err != nil {
p.Error(err)
}
last := names[len(names)-1]
tbl := p.newTable(ast.TableTypeArray, last)
switch v := parent.Fields[last].(type) {
case nil:
parent.Fields[last] = []*ast.Table{tbl}
case []*ast.Table:
parent.Fields[last] = append(v, tbl)
case *ast.Table:
p.Error(fmt.Errorf("array table `%s' is in conflict with table in line %d", name, v.Line))
case *ast.KeyValue:
p.Error(fmt.Errorf("array table `%s' is in conflict with line %d", name, v.Line))
default:
p.Error(fmt.Errorf("BUG: array table `%s' is in conflict but it's unknown type `%T'", name, v))
}
p.currentTable = tbl
}
func (p *toml) StartInlineTable() {
p.skip = false
p.stack = append(p.stack, &stack{p.key, p.currentTable})
names := []string{p.key}
if p.arr == nil {
p.setTable(p.currentTable, names[0], names)
} else {
p.setArrayTable(p.currentTable, names[0], names)
}
}
func (p *toml) EndInlineTable() {
st := p.stack[len(p.stack)-1]
p.key, p.currentTable = st.key, st.table
p.stack[len(p.stack)-1] = nil
p.stack = p.stack[:len(p.stack)-1]
p.skip = true
}
func (p *toml) AddLineCount(i int) {
p.line += i
}
func (p *toml) SetKey(buf []rune, begin, end int) {
p.key = string(buf[begin:end])
if len(p.key) > 0 && p.key[0] == '"' {
p.key = p.unquote(p.key)
}
}
func (p *toml) AddTableKey() {
p.tableKeys = append(p.tableKeys, p.key)
}
func (p *toml) AddKeyValue() {
if p.skip {
p.skip = false
return
}
if val, exists := p.currentTable.Fields[p.key]; exists {
switch v := val.(type) {
case *ast.Table:
p.Error(fmt.Errorf("key `%s' is in conflict with table in line %d", p.key, v.Line))
case *ast.KeyValue:
p.Error(fmt.Errorf("key `%s' is in conflict with line %xd", p.key, v.Line))
default:
p.Error(fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", p.key, v))
}
}
p.currentTable.Fields[p.key] = &ast.KeyValue{Key: p.key, Value: p.val, Line: p.line}
}
func (p *toml) SetBasicString(buf []rune, begin, end int) {
p.s = p.unquote(string(buf[begin:end]))
}
func (p *toml) SetMultilineString() {
p.s = p.unquote(`"` + escapeReplacer.Replace(strings.TrimLeft(p.s, "\r\n")) + `"`)
}
func (p *toml) AddMultilineBasicBody(buf []rune, begin, end int) {
p.s += string(buf[begin:end])
}
func (p *toml) SetLiteralString(buf []rune, begin, end int) {
p.s = string(buf[begin:end])
}
func (p *toml) SetMultilineLiteralString(buf []rune, begin, end int) {
p.s = strings.TrimLeft(string(buf[begin:end]), "\r\n")
}
func (p *toml) unquote(s string) string {
s, err := strconv.Unquote(s)
if err != nil {
p.Error(err)
}
return s
}
func (p *toml) lookupTable(t *ast.Table, keys []string) (*ast.Table, error) {
for _, s := range keys {
val, exists := t.Fields[s]
if !exists {
tbl := p.newTable(ast.TableTypeNormal, s)
t.Fields[s] = tbl
t = tbl
continue
}
switch v := val.(type) {
case *ast.Table:
t = v
case []*ast.Table:
t = v[len(v)-1]
case *ast.KeyValue:
return nil, fmt.Errorf("key `%s' is in conflict with line %d", s, v.Line)
default:
return nil, fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", s, v)
}
}
return t, nil
}
| 8,531 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/util.go | package toml
import (
"fmt"
"reflect"
"strings"
)
const fieldTagName = "toml"
// fieldCache maps normalized field names to their position in a struct.
type fieldCache struct {
named map[string]fieldInfo // fields with an explicit name in tag
auto map[string]fieldInfo // fields with auto-assigned normalized names
}
type fieldInfo struct {
index []int
name string
ignored bool
}
func makeFieldCache(cfg *Config, rt reflect.Type) fieldCache {
named, auto := make(map[string]fieldInfo), make(map[string]fieldInfo)
for i := 0; i < rt.NumField(); i++ {
ft := rt.Field(i)
// skip unexported fields
if ft.PkgPath != "" && !ft.Anonymous {
continue
}
col, _ := extractTag(ft.Tag.Get(fieldTagName))
info := fieldInfo{index: ft.Index, name: ft.Name, ignored: col == "-"}
if col == "" || col == "-" {
auto[cfg.NormFieldName(rt, ft.Name)] = info
} else {
named[col] = info
}
}
return fieldCache{named, auto}
}
func (fc fieldCache) findField(cfg *Config, rv reflect.Value, name string) (reflect.Value, string, error) {
info, found := fc.named[name]
if !found {
info, found = fc.auto[cfg.NormFieldName(rv.Type(), name)]
}
if !found {
if cfg.MissingField == nil {
return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' is not defined in %v", name, rv.Type())
} else {
return reflect.Value{}, "", cfg.MissingField(rv.Type(), name)
}
} else if info.ignored {
return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' in %v cannot be set through TOML", name, rv.Type())
}
return rv.FieldByIndex(info.index), info.name, nil
}
func extractTag(tag string) (col, rest string) {
tags := strings.SplitN(tag, ",", 2)
if len(tags) == 2 {
return strings.TrimSpace(tags[0]), strings.TrimSpace(tags[1])
}
return strings.TrimSpace(tags[0]), ""
}
| 8,532 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/LICENSE | Copyright (c) 2014 Naoya Inada <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| 8,533 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/parse.peg.go | package toml
import (
"fmt"
"math"
"sort"
"strconv"
)
const endSymbol rune = 1114112
/* The rule types inferred from the grammar are below. */
type pegRule uint8
const (
ruleUnknown pegRule = iota
ruleTOML
ruleExpression
rulenewline
rulews
rulewsnl
rulecomment
rulekeyval
rulekey
rulebareKey
rulequotedKey
ruleval
ruletable
rulestdTable
rulearrayTable
ruleinlineTable
ruleinlineTableKeyValues
ruletableKey
ruletableKeyComp
ruletableKeySep
ruleinlineTableValSep
ruleinteger
ruleint
rulefloat
rulefrac
ruleexp
rulestring
rulebasicString
rulebasicChar
ruleescaped
rulebasicUnescaped
ruleescape
rulemlBasicString
rulemlBasicBody
ruleliteralString
ruleliteralChar
rulemlLiteralString
rulemlLiteralBody
rulemlLiteralChar
rulehexdigit
rulehexQuad
ruleboolean
ruledateFullYear
ruledateMonth
ruledateMDay
ruletimeHour
ruletimeMinute
ruletimeSecond
ruletimeSecfrac
ruletimeNumoffset
ruletimeOffset
rulepartialTime
rulefullDate
rulefullTime
ruledatetime
ruledigit
ruledigitDual
ruledigitQuad
rulearray
rulearrayValues
rulearraySep
ruleAction0
rulePegText
ruleAction1
ruleAction2
ruleAction3
ruleAction4
ruleAction5
ruleAction6
ruleAction7
ruleAction8
ruleAction9
ruleAction10
ruleAction11
ruleAction12
ruleAction13
ruleAction14
ruleAction15
ruleAction16
ruleAction17
ruleAction18
ruleAction19
ruleAction20
ruleAction21
ruleAction22
ruleAction23
ruleAction24
ruleAction25
)
var rul3s = [...]string{
"Unknown",
"TOML",
"Expression",
"newline",
"ws",
"wsnl",
"comment",
"keyval",
"key",
"bareKey",
"quotedKey",
"val",
"table",
"stdTable",
"arrayTable",
"inlineTable",
"inlineTableKeyValues",
"tableKey",
"tableKeyComp",
"tableKeySep",
"inlineTableValSep",
"integer",
"int",
"float",
"frac",
"exp",
"string",
"basicString",
"basicChar",
"escaped",
"basicUnescaped",
"escape",
"mlBasicString",
"mlBasicBody",
"literalString",
"literalChar",
"mlLiteralString",
"mlLiteralBody",
"mlLiteralChar",
"hexdigit",
"hexQuad",
"boolean",
"dateFullYear",
"dateMonth",
"dateMDay",
"timeHour",
"timeMinute",
"timeSecond",
"timeSecfrac",
"timeNumoffset",
"timeOffset",
"partialTime",
"fullDate",
"fullTime",
"datetime",
"digit",
"digitDual",
"digitQuad",
"array",
"arrayValues",
"arraySep",
"Action0",
"PegText",
"Action1",
"Action2",
"Action3",
"Action4",
"Action5",
"Action6",
"Action7",
"Action8",
"Action9",
"Action10",
"Action11",
"Action12",
"Action13",
"Action14",
"Action15",
"Action16",
"Action17",
"Action18",
"Action19",
"Action20",
"Action21",
"Action22",
"Action23",
"Action24",
"Action25",
}
type token32 struct {
pegRule
begin, end uint32
}
func (t *token32) String() string {
return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end)
}
type node32 struct {
token32
up, next *node32
}
func (node *node32) print(pretty bool, buffer string) {
var print func(node *node32, depth int)
print = func(node *node32, depth int) {
for node != nil {
for c := 0; c < depth; c++ {
fmt.Printf(" ")
}
rule := rul3s[node.pegRule]
quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end])))
if !pretty {
fmt.Printf("%v %v\n", rule, quote)
} else {
fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote)
}
if node.up != nil {
print(node.up, depth+1)
}
node = node.next
}
}
print(node, 0)
}
func (node *node32) Print(buffer string) {
node.print(false, buffer)
}
func (node *node32) PrettyPrint(buffer string) {
node.print(true, buffer)
}
type tokens32 struct {
tree []token32
}
func (t *tokens32) Trim(length uint32) {
t.tree = t.tree[:length]
}
func (t *tokens32) Print() {
for _, token := range t.tree {
fmt.Println(token.String())
}
}
func (t *tokens32) AST() *node32 {
type element struct {
node *node32
down *element
}
tokens := t.Tokens()
var stack *element
for _, token := range tokens {
if token.begin == token.end {
continue
}
node := &node32{token32: token}
for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end {
stack.node.next = node.up
node.up = stack.node
stack = stack.down
}
stack = &element{node: node, down: stack}
}
if stack != nil {
return stack.node
}
return nil
}
func (t *tokens32) PrintSyntaxTree(buffer string) {
t.AST().Print(buffer)
}
func (t *tokens32) PrettyPrintSyntaxTree(buffer string) {
t.AST().PrettyPrint(buffer)
}
func (t *tokens32) Add(rule pegRule, begin, end, index uint32) {
if tree := t.tree; int(index) >= len(tree) {
expanded := make([]token32, 2*len(tree))
copy(expanded, tree)
t.tree = expanded
}
t.tree[index] = token32{
pegRule: rule,
begin: begin,
end: end,
}
}
func (t *tokens32) Tokens() []token32 {
return t.tree
}
type tomlParser struct {
toml
Buffer string
buffer []rune
rules [88]func() bool
parse func(rule ...int) error
reset func()
Pretty bool
tokens32
}
func (p *tomlParser) Parse(rule ...int) error {
return p.parse(rule...)
}
func (p *tomlParser) Reset() {
p.reset()
}
type textPosition struct {
line, symbol int
}
type textPositionMap map[int]textPosition
func translatePositions(buffer []rune, positions []int) textPositionMap {
length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0
sort.Ints(positions)
search:
for i, c := range buffer {
if c == '\n' {
line, symbol = line+1, 0
} else {
symbol++
}
if i == positions[j] {
translations[positions[j]] = textPosition{line, symbol}
for j++; j < length; j++ {
if i != positions[j] {
continue search
}
}
break search
}
}
return translations
}
type parseError struct {
p *tomlParser
max token32
}
func (e *parseError) Error() string {
tokens, error := []token32{e.max}, "\n"
positions, p := make([]int, 2*len(tokens)), 0
for _, token := range tokens {
positions[p], p = int(token.begin), p+1
positions[p], p = int(token.end), p+1
}
translations := translatePositions(e.p.buffer, positions)
format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n"
if e.p.Pretty {
format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n"
}
for _, token := range tokens {
begin, end := int(token.begin), int(token.end)
error += fmt.Sprintf(format,
rul3s[token.pegRule],
translations[begin].line, translations[begin].symbol,
translations[end].line, translations[end].symbol,
strconv.Quote(string(e.p.buffer[begin:end])))
}
return error
}
func (p *tomlParser) PrintSyntaxTree() {
if p.Pretty {
p.tokens32.PrettyPrintSyntaxTree(p.Buffer)
} else {
p.tokens32.PrintSyntaxTree(p.Buffer)
}
}
func (p *tomlParser) Execute() {
buffer, _buffer, text, begin, end := p.Buffer, p.buffer, "", 0, 0
for _, token := range p.Tokens() {
switch token.pegRule {
case rulePegText:
begin, end = int(token.begin), int(token.end)
text = string(_buffer[begin:end])
case ruleAction0:
_ = buffer
case ruleAction1:
p.SetTableString(begin, end)
case ruleAction2:
p.AddLineCount(end - begin)
case ruleAction3:
p.AddLineCount(end - begin)
case ruleAction4:
p.AddKeyValue()
case ruleAction5:
p.SetKey(p.buffer, begin, end)
case ruleAction6:
p.SetKey(p.buffer, begin, end)
case ruleAction7:
p.SetTime(begin, end)
case ruleAction8:
p.SetFloat64(begin, end)
case ruleAction9:
p.SetInt64(begin, end)
case ruleAction10:
p.SetString(begin, end)
case ruleAction11:
p.SetBool(begin, end)
case ruleAction12:
p.SetArray(begin, end)
case ruleAction13:
p.SetTable(p.buffer, begin, end)
case ruleAction14:
p.SetArrayTable(p.buffer, begin, end)
case ruleAction15:
p.StartInlineTable()
case ruleAction16:
p.EndInlineTable()
case ruleAction17:
p.AddTableKey()
case ruleAction18:
p.SetBasicString(p.buffer, begin, end)
case ruleAction19:
p.SetMultilineString()
case ruleAction20:
p.AddMultilineBasicBody(p.buffer, begin, end)
case ruleAction21:
p.SetLiteralString(p.buffer, begin, end)
case ruleAction22:
p.SetMultilineLiteralString(p.buffer, begin, end)
case ruleAction23:
p.StartArray()
case ruleAction24:
p.AddArrayVal()
case ruleAction25:
p.AddArrayVal()
}
}
_, _, _, _, _ = buffer, _buffer, text, begin, end
}
func (p *tomlParser) Init() {
var (
max token32
position, tokenIndex uint32
buffer []rune
)
p.reset = func() {
max = token32{}
position, tokenIndex = 0, 0
p.buffer = []rune(p.Buffer)
if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol {
p.buffer = append(p.buffer, endSymbol)
}
buffer = p.buffer
}
p.reset()
_rules := p.rules
tree := tokens32{tree: make([]token32, math.MaxInt16)}
p.parse = func(rule ...int) error {
r := 1
if len(rule) > 0 {
r = rule[0]
}
matches := p.rules[r]()
p.tokens32 = tree
if matches {
p.Trim(tokenIndex)
return nil
}
return &parseError{p, max}
}
add := func(rule pegRule, begin uint32) {
tree.Add(rule, begin, position, tokenIndex)
tokenIndex++
if begin != position && position > max.end {
max = token32{rule, begin, position}
}
}
matchDot := func() bool {
if buffer[position] != endSymbol {
position++
return true
}
return false
}
/*matchChar := func(c byte) bool {
if buffer[position] == c {
position++
return true
}
return false
}*/
/*matchRange := func(lower byte, upper byte) bool {
if c := buffer[position]; c >= lower && c <= upper {
position++
return true
}
return false
}*/
_rules = [...]func() bool{
nil,
/* 0 TOML <- <(Expression (newline Expression)* newline? !. Action0)> */
func() bool {
position0, tokenIndex0 := position, tokenIndex
{
position1 := position
if !_rules[ruleExpression]() {
goto l0
}
l2:
{
position3, tokenIndex3 := position, tokenIndex
if !_rules[rulenewline]() {
goto l3
}
if !_rules[ruleExpression]() {
goto l3
}
goto l2
l3:
position, tokenIndex = position3, tokenIndex3
}
{
position4, tokenIndex4 := position, tokenIndex
if !_rules[rulenewline]() {
goto l4
}
goto l5
l4:
position, tokenIndex = position4, tokenIndex4
}
l5:
{
position6, tokenIndex6 := position, tokenIndex
if !matchDot() {
goto l6
}
goto l0
l6:
position, tokenIndex = position6, tokenIndex6
}
{
add(ruleAction0, position)
}
add(ruleTOML, position1)
}
return true
l0:
position, tokenIndex = position0, tokenIndex0
return false
},
/* 1 Expression <- <((<(ws table ws comment? (wsnl keyval ws comment?)*)> Action1) / (ws keyval ws comment?) / (ws comment?) / ws)> */
func() bool {
position8, tokenIndex8 := position, tokenIndex
{
position9 := position
{
position10, tokenIndex10 := position, tokenIndex
{
position12 := position
if !_rules[rulews]() {
goto l11
}
{
position13 := position
{
position14, tokenIndex14 := position, tokenIndex
{
position16 := position
if buffer[position] != rune('[') {
goto l15
}
position++
if !_rules[rulews]() {
goto l15
}
{
position17 := position
if !_rules[ruletableKey]() {
goto l15
}
add(rulePegText, position17)
}
if !_rules[rulews]() {
goto l15
}
if buffer[position] != rune(']') {
goto l15
}
position++
{
add(ruleAction13, position)
}
add(rulestdTable, position16)
}
goto l14
l15:
position, tokenIndex = position14, tokenIndex14
{
position19 := position
if buffer[position] != rune('[') {
goto l11
}
position++
if buffer[position] != rune('[') {
goto l11
}
position++
if !_rules[rulews]() {
goto l11
}
{
position20 := position
if !_rules[ruletableKey]() {
goto l11
}
add(rulePegText, position20)
}
if !_rules[rulews]() {
goto l11
}
if buffer[position] != rune(']') {
goto l11
}
position++
if buffer[position] != rune(']') {
goto l11
}
position++
{
add(ruleAction14, position)
}
add(rulearrayTable, position19)
}
}
l14:
add(ruletable, position13)
}
if !_rules[rulews]() {
goto l11
}
{
position22, tokenIndex22 := position, tokenIndex
if !_rules[rulecomment]() {
goto l22
}
goto l23
l22:
position, tokenIndex = position22, tokenIndex22
}
l23:
l24:
{
position25, tokenIndex25 := position, tokenIndex
if !_rules[rulewsnl]() {
goto l25
}
if !_rules[rulekeyval]() {
goto l25
}
if !_rules[rulews]() {
goto l25
}
{
position26, tokenIndex26 := position, tokenIndex
if !_rules[rulecomment]() {
goto l26
}
goto l27
l26:
position, tokenIndex = position26, tokenIndex26
}
l27:
goto l24
l25:
position, tokenIndex = position25, tokenIndex25
}
add(rulePegText, position12)
}
{
add(ruleAction1, position)
}
goto l10
l11:
position, tokenIndex = position10, tokenIndex10
if !_rules[rulews]() {
goto l29
}
if !_rules[rulekeyval]() {
goto l29
}
if !_rules[rulews]() {
goto l29
}
{
position30, tokenIndex30 := position, tokenIndex
if !_rules[rulecomment]() {
goto l30
}
goto l31
l30:
position, tokenIndex = position30, tokenIndex30
}
l31:
goto l10
l29:
position, tokenIndex = position10, tokenIndex10
if !_rules[rulews]() {
goto l32
}
{
position33, tokenIndex33 := position, tokenIndex
if !_rules[rulecomment]() {
goto l33
}
goto l34
l33:
position, tokenIndex = position33, tokenIndex33
}
l34:
goto l10
l32:
position, tokenIndex = position10, tokenIndex10
if !_rules[rulews]() {
goto l8
}
}
l10:
add(ruleExpression, position9)
}
return true
l8:
position, tokenIndex = position8, tokenIndex8
return false
},
/* 2 newline <- <(<('\r' / '\n')+> Action2)> */
func() bool {
position35, tokenIndex35 := position, tokenIndex
{
position36 := position
{
position37 := position
{
position40, tokenIndex40 := position, tokenIndex
if buffer[position] != rune('\r') {
goto l41
}
position++
goto l40
l41:
position, tokenIndex = position40, tokenIndex40
if buffer[position] != rune('\n') {
goto l35
}
position++
}
l40:
l38:
{
position39, tokenIndex39 := position, tokenIndex
{
position42, tokenIndex42 := position, tokenIndex
if buffer[position] != rune('\r') {
goto l43
}
position++
goto l42
l43:
position, tokenIndex = position42, tokenIndex42
if buffer[position] != rune('\n') {
goto l39
}
position++
}
l42:
goto l38
l39:
position, tokenIndex = position39, tokenIndex39
}
add(rulePegText, position37)
}
{
add(ruleAction2, position)
}
add(rulenewline, position36)
}
return true
l35:
position, tokenIndex = position35, tokenIndex35
return false
},
/* 3 ws <- <(' ' / '\t')*> */
func() bool {
{
position46 := position
l47:
{
position48, tokenIndex48 := position, tokenIndex
{
position49, tokenIndex49 := position, tokenIndex
if buffer[position] != rune(' ') {
goto l50
}
position++
goto l49
l50:
position, tokenIndex = position49, tokenIndex49
if buffer[position] != rune('\t') {
goto l48
}
position++
}
l49:
goto l47
l48:
position, tokenIndex = position48, tokenIndex48
}
add(rulews, position46)
}
return true
},
/* 4 wsnl <- <((&('\t') '\t') | (&(' ') ' ') | (&('\n' | '\r') (<('\r' / '\n')> Action3)))*> */
func() bool {
{
position52 := position
l53:
{
position54, tokenIndex54 := position, tokenIndex
{
switch buffer[position] {
case '\t':
if buffer[position] != rune('\t') {
goto l54
}
position++
break
case ' ':
if buffer[position] != rune(' ') {
goto l54
}
position++
break
default:
{
position56 := position
{
position57, tokenIndex57 := position, tokenIndex
if buffer[position] != rune('\r') {
goto l58
}
position++
goto l57
l58:
position, tokenIndex = position57, tokenIndex57
if buffer[position] != rune('\n') {
goto l54
}
position++
}
l57:
add(rulePegText, position56)
}
{
add(ruleAction3, position)
}
break
}
}
goto l53
l54:
position, tokenIndex = position54, tokenIndex54
}
add(rulewsnl, position52)
}
return true
},
/* 5 comment <- <('#' <('\t' / [ -\U0010ffff])*>)> */
func() bool {
position60, tokenIndex60 := position, tokenIndex
{
position61 := position
if buffer[position] != rune('#') {
goto l60
}
position++
{
position62 := position
l63:
{
position64, tokenIndex64 := position, tokenIndex
{
position65, tokenIndex65 := position, tokenIndex
if buffer[position] != rune('\t') {
goto l66
}
position++
goto l65
l66:
position, tokenIndex = position65, tokenIndex65
if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') {
goto l64
}
position++
}
l65:
goto l63
l64:
position, tokenIndex = position64, tokenIndex64
}
add(rulePegText, position62)
}
add(rulecomment, position61)
}
return true
l60:
position, tokenIndex = position60, tokenIndex60
return false
},
/* 6 keyval <- <(key ws '=' ws val Action4)> */
func() bool {
position67, tokenIndex67 := position, tokenIndex
{
position68 := position
if !_rules[rulekey]() {
goto l67
}
if !_rules[rulews]() {
goto l67
}
if buffer[position] != rune('=') {
goto l67
}
position++
if !_rules[rulews]() {
goto l67
}
if !_rules[ruleval]() {
goto l67
}
{
add(ruleAction4, position)
}
add(rulekeyval, position68)
}
return true
l67:
position, tokenIndex = position67, tokenIndex67
return false
},
/* 7 key <- <(bareKey / quotedKey)> */
func() bool {
position70, tokenIndex70 := position, tokenIndex
{
position71 := position
{
position72, tokenIndex72 := position, tokenIndex
{
position74 := position
{
position75 := position
{
switch buffer[position] {
case '_':
if buffer[position] != rune('_') {
goto l73
}
position++
break
case '-':
if buffer[position] != rune('-') {
goto l73
}
position++
break
case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z':
if c := buffer[position]; c < rune('a') || c > rune('z') {
goto l73
}
position++
break
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
if c := buffer[position]; c < rune('0') || c > rune('9') {
goto l73
}
position++
break
default:
if c := buffer[position]; c < rune('A') || c > rune('Z') {
goto l73
}
position++
break
}
}
l76:
{
position77, tokenIndex77 := position, tokenIndex
{
switch buffer[position] {
case '_':
if buffer[position] != rune('_') {
goto l77
}
position++
break
case '-':
if buffer[position] != rune('-') {
goto l77
}
position++
break
case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z':
if c := buffer[position]; c < rune('a') || c > rune('z') {
goto l77
}
position++
break
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
if c := buffer[position]; c < rune('0') || c > rune('9') {
goto l77
}
position++
break
default:
if c := buffer[position]; c < rune('A') || c > rune('Z') {
goto l77
}
position++
break
}
}
goto l76
l77:
position, tokenIndex = position77, tokenIndex77
}
add(rulePegText, position75)
}
{
add(ruleAction5, position)
}
add(rulebareKey, position74)
}
goto l72
l73:
position, tokenIndex = position72, tokenIndex72
{
position81 := position
{
position82 := position
if buffer[position] != rune('"') {
goto l70
}
position++
l83:
{
position84, tokenIndex84 := position, tokenIndex
if !_rules[rulebasicChar]() {
goto l84
}
goto l83
l84:
position, tokenIndex = position84, tokenIndex84
}
if buffer[position] != rune('"') {
goto l70
}
position++
add(rulePegText, position82)
}
{
add(ruleAction6, position)
}
add(rulequotedKey, position81)
}
}
l72:
add(rulekey, position71)
}
return true
l70:
position, tokenIndex = position70, tokenIndex70
return false
},
/* 8 bareKey <- <(<((&('_') '_') | (&('-') '-') | (&('a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z') [a-z]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z') [A-Z]))+> Action5)> */
nil,
/* 9 quotedKey <- <(<('"' basicChar* '"')> Action6)> */
nil,
/* 10 val <- <((<datetime> Action7) / (<float> Action8) / ((&('{') inlineTable) | (&('[') (<array> Action12)) | (&('f' | 't') (<boolean> Action11)) | (&('"' | '\'') (<string> Action10)) | (&('+' | '-' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') (<integer> Action9))))> */
func() bool {
position88, tokenIndex88 := position, tokenIndex
{
position89 := position
{
position90, tokenIndex90 := position, tokenIndex
{
position92 := position
{
position93 := position
{
position94, tokenIndex94 := position, tokenIndex
{
position96 := position
{
position97 := position
{
position98 := position
if !_rules[ruledigitDual]() {
goto l95
}
if !_rules[ruledigitDual]() {
goto l95
}
add(ruledigitQuad, position98)
}
add(ruledateFullYear, position97)
}
if buffer[position] != rune('-') {
goto l95
}
position++
{
position99 := position
if !_rules[ruledigitDual]() {
goto l95
}
add(ruledateMonth, position99)
}
if buffer[position] != rune('-') {
goto l95
}
position++
{
position100 := position
if !_rules[ruledigitDual]() {
goto l95
}
add(ruledateMDay, position100)
}
add(rulefullDate, position96)
}
{
position101, tokenIndex101 := position, tokenIndex
if buffer[position] != rune('T') {
goto l101
}
position++
{
position103 := position
if !_rules[rulepartialTime]() {
goto l101
}
{
position104 := position
{
position105, tokenIndex105 := position, tokenIndex
if buffer[position] != rune('Z') {
goto l106
}
position++
goto l105
l106:
position, tokenIndex = position105, tokenIndex105
{
position107 := position
{
position108, tokenIndex108 := position, tokenIndex
if buffer[position] != rune('-') {
goto l109
}
position++
goto l108
l109:
position, tokenIndex = position108, tokenIndex108
if buffer[position] != rune('+') {
goto l101
}
position++
}
l108:
if !_rules[ruletimeHour]() {
goto l101
}
if buffer[position] != rune(':') {
goto l101
}
position++
if !_rules[ruletimeMinute]() {
goto l101
}
add(ruletimeNumoffset, position107)
}
}
l105:
add(ruletimeOffset, position104)
}
add(rulefullTime, position103)
}
goto l102
l101:
position, tokenIndex = position101, tokenIndex101
}
l102:
goto l94
l95:
position, tokenIndex = position94, tokenIndex94
if !_rules[rulepartialTime]() {
goto l91
}
}
l94:
add(ruledatetime, position93)
}
add(rulePegText, position92)
}
{
add(ruleAction7, position)
}
goto l90
l91:
position, tokenIndex = position90, tokenIndex90
{
position112 := position
{
position113 := position
if !_rules[ruleinteger]() {
goto l111
}
{
position114, tokenIndex114 := position, tokenIndex
if !_rules[rulefrac]() {
goto l115
}
{
position116, tokenIndex116 := position, tokenIndex
if !_rules[ruleexp]() {
goto l116
}
goto l117
l116:
position, tokenIndex = position116, tokenIndex116
}
l117:
goto l114
l115:
position, tokenIndex = position114, tokenIndex114
{
position118, tokenIndex118 := position, tokenIndex
if !_rules[rulefrac]() {
goto l118
}
goto l119
l118:
position, tokenIndex = position118, tokenIndex118
}
l119:
if !_rules[ruleexp]() {
goto l111
}
}
l114:
add(rulefloat, position113)
}
add(rulePegText, position112)
}
{
add(ruleAction8, position)
}
goto l90
l111:
position, tokenIndex = position90, tokenIndex90
{
switch buffer[position] {
case '{':
{
position122 := position
if buffer[position] != rune('{') {
goto l88
}
position++
{
add(ruleAction15, position)
}
if !_rules[rulews]() {
goto l88
}
{
position124 := position
l125:
{
position126, tokenIndex126 := position, tokenIndex
if !_rules[rulekeyval]() {
goto l126
}
{
position127, tokenIndex127 := position, tokenIndex
{
position129 := position
if !_rules[rulews]() {
goto l127
}
if buffer[position] != rune(',') {
goto l127
}
position++
if !_rules[rulews]() {
goto l127
}
add(ruleinlineTableValSep, position129)
}
goto l128
l127:
position, tokenIndex = position127, tokenIndex127
}
l128:
goto l125
l126:
position, tokenIndex = position126, tokenIndex126
}
add(ruleinlineTableKeyValues, position124)
}
if !_rules[rulews]() {
goto l88
}
if buffer[position] != rune('}') {
goto l88
}
position++
{
add(ruleAction16, position)
}
add(ruleinlineTable, position122)
}
break
case '[':
{
position131 := position
{
position132 := position
if buffer[position] != rune('[') {
goto l88
}
position++
{
add(ruleAction23, position)
}
if !_rules[rulewsnl]() {
goto l88
}
{
position134, tokenIndex134 := position, tokenIndex
{
position136 := position
if !_rules[ruleval]() {
goto l134
}
{
add(ruleAction24, position)
}
l138:
{
position139, tokenIndex139 := position, tokenIndex
if !_rules[rulewsnl]() {
goto l139
}
{
position140, tokenIndex140 := position, tokenIndex
if !_rules[rulecomment]() {
goto l140
}
goto l141
l140:
position, tokenIndex = position140, tokenIndex140
}
l141:
if !_rules[rulewsnl]() {
goto l139
}
if !_rules[rulearraySep]() {
goto l139
}
if !_rules[rulewsnl]() {
goto l139
}
{
position142, tokenIndex142 := position, tokenIndex
if !_rules[rulecomment]() {
goto l142
}
goto l143
l142:
position, tokenIndex = position142, tokenIndex142
}
l143:
if !_rules[rulewsnl]() {
goto l139
}
if !_rules[ruleval]() {
goto l139
}
{
add(ruleAction25, position)
}
goto l138
l139:
position, tokenIndex = position139, tokenIndex139
}
if !_rules[rulewsnl]() {
goto l134
}
{
position145, tokenIndex145 := position, tokenIndex
if !_rules[rulearraySep]() {
goto l145
}
goto l146
l145:
position, tokenIndex = position145, tokenIndex145
}
l146:
if !_rules[rulewsnl]() {
goto l134
}
{
position147, tokenIndex147 := position, tokenIndex
if !_rules[rulecomment]() {
goto l147
}
goto l148
l147:
position, tokenIndex = position147, tokenIndex147
}
l148:
add(rulearrayValues, position136)
}
goto l135
l134:
position, tokenIndex = position134, tokenIndex134
}
l135:
if !_rules[rulewsnl]() {
goto l88
}
if buffer[position] != rune(']') {
goto l88
}
position++
add(rulearray, position132)
}
add(rulePegText, position131)
}
{
add(ruleAction12, position)
}
break
case 'f', 't':
{
position150 := position
{
position151 := position
{
position152, tokenIndex152 := position, tokenIndex
if buffer[position] != rune('t') {
goto l153
}
position++
if buffer[position] != rune('r') {
goto l153
}
position++
if buffer[position] != rune('u') {
goto l153
}
position++
if buffer[position] != rune('e') {
goto l153
}
position++
goto l152
l153:
position, tokenIndex = position152, tokenIndex152
if buffer[position] != rune('f') {
goto l88
}
position++
if buffer[position] != rune('a') {
goto l88
}
position++
if buffer[position] != rune('l') {
goto l88
}
position++
if buffer[position] != rune('s') {
goto l88
}
position++
if buffer[position] != rune('e') {
goto l88
}
position++
}
l152:
add(ruleboolean, position151)
}
add(rulePegText, position150)
}
{
add(ruleAction11, position)
}
break
case '"', '\'':
{
position155 := position
{
position156 := position
{
position157, tokenIndex157 := position, tokenIndex
{
position159 := position
if buffer[position] != rune('\'') {
goto l158
}
position++
if buffer[position] != rune('\'') {
goto l158
}
position++
if buffer[position] != rune('\'') {
goto l158
}
position++
{
position160 := position
{
position161 := position
l162:
{
position163, tokenIndex163 := position, tokenIndex
{
position164, tokenIndex164 := position, tokenIndex
if buffer[position] != rune('\'') {
goto l164
}
position++
if buffer[position] != rune('\'') {
goto l164
}
position++
if buffer[position] != rune('\'') {
goto l164
}
position++
goto l163
l164:
position, tokenIndex = position164, tokenIndex164
}
{
position165, tokenIndex165 := position, tokenIndex
{
position167 := position
{
position168, tokenIndex168 := position, tokenIndex
if buffer[position] != rune('\t') {
goto l169
}
position++
goto l168
l169:
position, tokenIndex = position168, tokenIndex168
if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') {
goto l166
}
position++
}
l168:
add(rulemlLiteralChar, position167)
}
goto l165
l166:
position, tokenIndex = position165, tokenIndex165
if !_rules[rulenewline]() {
goto l163
}
}
l165:
goto l162
l163:
position, tokenIndex = position163, tokenIndex163
}
add(rulemlLiteralBody, position161)
}
add(rulePegText, position160)
}
if buffer[position] != rune('\'') {
goto l158
}
position++
if buffer[position] != rune('\'') {
goto l158
}
position++
if buffer[position] != rune('\'') {
goto l158
}
position++
{
add(ruleAction22, position)
}
add(rulemlLiteralString, position159)
}
goto l157
l158:
position, tokenIndex = position157, tokenIndex157
{
position172 := position
if buffer[position] != rune('\'') {
goto l171
}
position++
{
position173 := position
l174:
{
position175, tokenIndex175 := position, tokenIndex
{
position176 := position
{
switch buffer[position] {
case '\t':
if buffer[position] != rune('\t') {
goto l175
}
position++
break
case ' ', '!', '"', '#', '$', '%', '&':
if c := buffer[position]; c < rune(' ') || c > rune('&') {
goto l175
}
position++
break
default:
if c := buffer[position]; c < rune('(') || c > rune('\U0010ffff') {
goto l175
}
position++
break
}
}
add(ruleliteralChar, position176)
}
goto l174
l175:
position, tokenIndex = position175, tokenIndex175
}
add(rulePegText, position173)
}
if buffer[position] != rune('\'') {
goto l171
}
position++
{
add(ruleAction21, position)
}
add(ruleliteralString, position172)
}
goto l157
l171:
position, tokenIndex = position157, tokenIndex157
{
position180 := position
if buffer[position] != rune('"') {
goto l179
}
position++
if buffer[position] != rune('"') {
goto l179
}
position++
if buffer[position] != rune('"') {
goto l179
}
position++
{
position181 := position
l182:
{
position183, tokenIndex183 := position, tokenIndex
{
position184, tokenIndex184 := position, tokenIndex
{
position186 := position
{
position187, tokenIndex187 := position, tokenIndex
if !_rules[rulebasicChar]() {
goto l188
}
goto l187
l188:
position, tokenIndex = position187, tokenIndex187
if !_rules[rulenewline]() {
goto l185
}
}
l187:
add(rulePegText, position186)
}
{
add(ruleAction20, position)
}
goto l184
l185:
position, tokenIndex = position184, tokenIndex184
if !_rules[ruleescape]() {
goto l183
}
if !_rules[rulenewline]() {
goto l183
}
if !_rules[rulewsnl]() {
goto l183
}
}
l184:
goto l182
l183:
position, tokenIndex = position183, tokenIndex183
}
add(rulemlBasicBody, position181)
}
if buffer[position] != rune('"') {
goto l179
}
position++
if buffer[position] != rune('"') {
goto l179
}
position++
if buffer[position] != rune('"') {
goto l179
}
position++
{
add(ruleAction19, position)
}
add(rulemlBasicString, position180)
}
goto l157
l179:
position, tokenIndex = position157, tokenIndex157
{
position191 := position
{
position192 := position
if buffer[position] != rune('"') {
goto l88
}
position++
l193:
{
position194, tokenIndex194 := position, tokenIndex
if !_rules[rulebasicChar]() {
goto l194
}
goto l193
l194:
position, tokenIndex = position194, tokenIndex194
}
if buffer[position] != rune('"') {
goto l88
}
position++
add(rulePegText, position192)
}
{
add(ruleAction18, position)
}
add(rulebasicString, position191)
}
}
l157:
add(rulestring, position156)
}
add(rulePegText, position155)
}
{
add(ruleAction10, position)
}
break
default:
{
position197 := position
if !_rules[ruleinteger]() {
goto l88
}
add(rulePegText, position197)
}
{
add(ruleAction9, position)
}
break
}
}
}
l90:
add(ruleval, position89)
}
return true
l88:
position, tokenIndex = position88, tokenIndex88
return false
},
/* 11 table <- <(stdTable / arrayTable)> */
nil,
/* 12 stdTable <- <('[' ws <tableKey> ws ']' Action13)> */
nil,
/* 13 arrayTable <- <('[' '[' ws <tableKey> ws (']' ']') Action14)> */
nil,
/* 14 inlineTable <- <('{' Action15 ws inlineTableKeyValues ws '}' Action16)> */
nil,
/* 15 inlineTableKeyValues <- <(keyval inlineTableValSep?)*> */
nil,
/* 16 tableKey <- <(tableKeyComp (tableKeySep tableKeyComp)*)> */
func() bool {
position204, tokenIndex204 := position, tokenIndex
{
position205 := position
if !_rules[ruletableKeyComp]() {
goto l204
}
l206:
{
position207, tokenIndex207 := position, tokenIndex
{
position208 := position
if !_rules[rulews]() {
goto l207
}
if buffer[position] != rune('.') {
goto l207
}
position++
if !_rules[rulews]() {
goto l207
}
add(ruletableKeySep, position208)
}
if !_rules[ruletableKeyComp]() {
goto l207
}
goto l206
l207:
position, tokenIndex = position207, tokenIndex207
}
add(ruletableKey, position205)
}
return true
l204:
position, tokenIndex = position204, tokenIndex204
return false
},
/* 17 tableKeyComp <- <(key Action17)> */
func() bool {
position209, tokenIndex209 := position, tokenIndex
{
position210 := position
if !_rules[rulekey]() {
goto l209
}
{
add(ruleAction17, position)
}
add(ruletableKeyComp, position210)
}
return true
l209:
position, tokenIndex = position209, tokenIndex209
return false
},
/* 18 tableKeySep <- <(ws '.' ws)> */
nil,
/* 19 inlineTableValSep <- <(ws ',' ws)> */
nil,
/* 20 integer <- <(('-' / '+')? int)> */
func() bool {
position214, tokenIndex214 := position, tokenIndex
{
position215 := position
{
position216, tokenIndex216 := position, tokenIndex
{
position218, tokenIndex218 := position, tokenIndex
if buffer[position] != rune('-') {
goto l219
}
position++
goto l218
l219:
position, tokenIndex = position218, tokenIndex218
if buffer[position] != rune('+') {
goto l216
}
position++
}
l218:
goto l217
l216:
position, tokenIndex = position216, tokenIndex216
}
l217:
{
position220 := position
{
position221, tokenIndex221 := position, tokenIndex
if c := buffer[position]; c < rune('1') || c > rune('9') {
goto l222
}
position++
{
position225, tokenIndex225 := position, tokenIndex
if !_rules[ruledigit]() {
goto l226
}
goto l225
l226:
position, tokenIndex = position225, tokenIndex225
if buffer[position] != rune('_') {
goto l222
}
position++
if !_rules[ruledigit]() {
goto l222
}
}
l225:
l223:
{
position224, tokenIndex224 := position, tokenIndex
{
position227, tokenIndex227 := position, tokenIndex
if !_rules[ruledigit]() {
goto l228
}
goto l227
l228:
position, tokenIndex = position227, tokenIndex227
if buffer[position] != rune('_') {
goto l224
}
position++
if !_rules[ruledigit]() {
goto l224
}
}
l227:
goto l223
l224:
position, tokenIndex = position224, tokenIndex224
}
goto l221
l222:
position, tokenIndex = position221, tokenIndex221
if !_rules[ruledigit]() {
goto l214
}
}
l221:
add(ruleint, position220)
}
add(ruleinteger, position215)
}
return true
l214:
position, tokenIndex = position214, tokenIndex214
return false
},
/* 21 int <- <(([1-9] (digit / ('_' digit))+) / digit)> */
nil,
/* 22 float <- <(integer ((frac exp?) / (frac? exp)))> */
nil,
/* 23 frac <- <('.' digit (digit / ('_' digit))*)> */
func() bool {
position231, tokenIndex231 := position, tokenIndex
{
position232 := position
if buffer[position] != rune('.') {
goto l231
}
position++
if !_rules[ruledigit]() {
goto l231
}
l233:
{
position234, tokenIndex234 := position, tokenIndex
{
position235, tokenIndex235 := position, tokenIndex
if !_rules[ruledigit]() {
goto l236
}
goto l235
l236:
position, tokenIndex = position235, tokenIndex235
if buffer[position] != rune('_') {
goto l234
}
position++
if !_rules[ruledigit]() {
goto l234
}
}
l235:
goto l233
l234:
position, tokenIndex = position234, tokenIndex234
}
add(rulefrac, position232)
}
return true
l231:
position, tokenIndex = position231, tokenIndex231
return false
},
/* 24 exp <- <(('e' / 'E') ('-' / '+')? digit (digit / ('_' digit))*)> */
func() bool {
position237, tokenIndex237 := position, tokenIndex
{
position238 := position
{
position239, tokenIndex239 := position, tokenIndex
if buffer[position] != rune('e') {
goto l240
}
position++
goto l239
l240:
position, tokenIndex = position239, tokenIndex239
if buffer[position] != rune('E') {
goto l237
}
position++
}
l239:
{
position241, tokenIndex241 := position, tokenIndex
{
position243, tokenIndex243 := position, tokenIndex
if buffer[position] != rune('-') {
goto l244
}
position++
goto l243
l244:
position, tokenIndex = position243, tokenIndex243
if buffer[position] != rune('+') {
goto l241
}
position++
}
l243:
goto l242
l241:
position, tokenIndex = position241, tokenIndex241
}
l242:
if !_rules[ruledigit]() {
goto l237
}
l245:
{
position246, tokenIndex246 := position, tokenIndex
{
position247, tokenIndex247 := position, tokenIndex
if !_rules[ruledigit]() {
goto l248
}
goto l247
l248:
position, tokenIndex = position247, tokenIndex247
if buffer[position] != rune('_') {
goto l246
}
position++
if !_rules[ruledigit]() {
goto l246
}
}
l247:
goto l245
l246:
position, tokenIndex = position246, tokenIndex246
}
add(ruleexp, position238)
}
return true
l237:
position, tokenIndex = position237, tokenIndex237
return false
},
/* 25 string <- <(mlLiteralString / literalString / mlBasicString / basicString)> */
nil,
/* 26 basicString <- <(<('"' basicChar* '"')> Action18)> */
nil,
/* 27 basicChar <- <(basicUnescaped / escaped)> */
func() bool {
position251, tokenIndex251 := position, tokenIndex
{
position252 := position
{
position253, tokenIndex253 := position, tokenIndex
{
position255 := position
{
switch buffer[position] {
case ' ', '!':
if c := buffer[position]; c < rune(' ') || c > rune('!') {
goto l254
}
position++
break
case '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[':
if c := buffer[position]; c < rune('#') || c > rune('[') {
goto l254
}
position++
break
default:
if c := buffer[position]; c < rune(']') || c > rune('\U0010ffff') {
goto l254
}
position++
break
}
}
add(rulebasicUnescaped, position255)
}
goto l253
l254:
position, tokenIndex = position253, tokenIndex253
{
position257 := position
if !_rules[ruleescape]() {
goto l251
}
{
switch buffer[position] {
case 'U':
if buffer[position] != rune('U') {
goto l251
}
position++
if !_rules[rulehexQuad]() {
goto l251
}
if !_rules[rulehexQuad]() {
goto l251
}
break
case 'u':
if buffer[position] != rune('u') {
goto l251
}
position++
if !_rules[rulehexQuad]() {
goto l251
}
break
case '\\':
if buffer[position] != rune('\\') {
goto l251
}
position++
break
case '/':
if buffer[position] != rune('/') {
goto l251
}
position++
break
case '"':
if buffer[position] != rune('"') {
goto l251
}
position++
break
case 'r':
if buffer[position] != rune('r') {
goto l251
}
position++
break
case 'f':
if buffer[position] != rune('f') {
goto l251
}
position++
break
case 'n':
if buffer[position] != rune('n') {
goto l251
}
position++
break
case 't':
if buffer[position] != rune('t') {
goto l251
}
position++
break
default:
if buffer[position] != rune('b') {
goto l251
}
position++
break
}
}
add(ruleescaped, position257)
}
}
l253:
add(rulebasicChar, position252)
}
return true
l251:
position, tokenIndex = position251, tokenIndex251
return false
},
/* 28 escaped <- <(escape ((&('U') ('U' hexQuad hexQuad)) | (&('u') ('u' hexQuad)) | (&('\\') '\\') | (&('/') '/') | (&('"') '"') | (&('r') 'r') | (&('f') 'f') | (&('n') 'n') | (&('t') 't') | (&('b') 'b')))> */
nil,
/* 29 basicUnescaped <- <((&(' ' | '!') [ -!]) | (&('#' | '$' | '%' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[') [#-[]) | (&(']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') []-\U0010ffff]))> */
nil,
/* 30 escape <- <'\\'> */
func() bool {
position261, tokenIndex261 := position, tokenIndex
{
position262 := position
if buffer[position] != rune('\\') {
goto l261
}
position++
add(ruleescape, position262)
}
return true
l261:
position, tokenIndex = position261, tokenIndex261
return false
},
/* 31 mlBasicString <- <('"' '"' '"' mlBasicBody ('"' '"' '"') Action19)> */
nil,
/* 32 mlBasicBody <- <((<(basicChar / newline)> Action20) / (escape newline wsnl))*> */
nil,
/* 33 literalString <- <('\'' <literalChar*> '\'' Action21)> */
nil,
/* 34 literalChar <- <((&('\t') '\t') | (&(' ' | '!' | '"' | '#' | '$' | '%' | '&') [ -&]) | (&('(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[' | '\\' | ']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') [(-\U0010ffff]))> */
nil,
/* 35 mlLiteralString <- <('\'' '\'' '\'' <mlLiteralBody> ('\'' '\'' '\'') Action22)> */
nil,
/* 36 mlLiteralBody <- <(!('\'' '\'' '\'') (mlLiteralChar / newline))*> */
nil,
/* 37 mlLiteralChar <- <('\t' / [ -\U0010ffff])> */
nil,
/* 38 hexdigit <- <((&('a' | 'b' | 'c' | 'd' | 'e' | 'f') [a-f]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F') [A-F]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]))> */
func() bool {
position270, tokenIndex270 := position, tokenIndex
{
position271 := position
{
switch buffer[position] {
case 'a', 'b', 'c', 'd', 'e', 'f':
if c := buffer[position]; c < rune('a') || c > rune('f') {
goto l270
}
position++
break
case 'A', 'B', 'C', 'D', 'E', 'F':
if c := buffer[position]; c < rune('A') || c > rune('F') {
goto l270
}
position++
break
default:
if c := buffer[position]; c < rune('0') || c > rune('9') {
goto l270
}
position++
break
}
}
add(rulehexdigit, position271)
}
return true
l270:
position, tokenIndex = position270, tokenIndex270
return false
},
/* 39 hexQuad <- <(hexdigit hexdigit hexdigit hexdigit)> */
func() bool {
position273, tokenIndex273 := position, tokenIndex
{
position274 := position
if !_rules[rulehexdigit]() {
goto l273
}
if !_rules[rulehexdigit]() {
goto l273
}
if !_rules[rulehexdigit]() {
goto l273
}
if !_rules[rulehexdigit]() {
goto l273
}
add(rulehexQuad, position274)
}
return true
l273:
position, tokenIndex = position273, tokenIndex273
return false
},
/* 40 boolean <- <(('t' 'r' 'u' 'e') / ('f' 'a' 'l' 's' 'e'))> */
nil,
/* 41 dateFullYear <- <digitQuad> */
nil,
/* 42 dateMonth <- <digitDual> */
nil,
/* 43 dateMDay <- <digitDual> */
nil,
/* 44 timeHour <- <digitDual> */
func() bool {
position279, tokenIndex279 := position, tokenIndex
{
position280 := position
if !_rules[ruledigitDual]() {
goto l279
}
add(ruletimeHour, position280)
}
return true
l279:
position, tokenIndex = position279, tokenIndex279
return false
},
/* 45 timeMinute <- <digitDual> */
func() bool {
position281, tokenIndex281 := position, tokenIndex
{
position282 := position
if !_rules[ruledigitDual]() {
goto l281
}
add(ruletimeMinute, position282)
}
return true
l281:
position, tokenIndex = position281, tokenIndex281
return false
},
/* 46 timeSecond <- <digitDual> */
nil,
/* 47 timeSecfrac <- <('.' digit+)> */
nil,
/* 48 timeNumoffset <- <(('-' / '+') timeHour ':' timeMinute)> */
nil,
/* 49 timeOffset <- <('Z' / timeNumoffset)> */
nil,
/* 50 partialTime <- <(timeHour ':' timeMinute ':' timeSecond timeSecfrac?)> */
func() bool {
position287, tokenIndex287 := position, tokenIndex
{
position288 := position
if !_rules[ruletimeHour]() {
goto l287
}
if buffer[position] != rune(':') {
goto l287
}
position++
if !_rules[ruletimeMinute]() {
goto l287
}
if buffer[position] != rune(':') {
goto l287
}
position++
{
position289 := position
if !_rules[ruledigitDual]() {
goto l287
}
add(ruletimeSecond, position289)
}
{
position290, tokenIndex290 := position, tokenIndex
{
position292 := position
if buffer[position] != rune('.') {
goto l290
}
position++
if !_rules[ruledigit]() {
goto l290
}
l293:
{
position294, tokenIndex294 := position, tokenIndex
if !_rules[ruledigit]() {
goto l294
}
goto l293
l294:
position, tokenIndex = position294, tokenIndex294
}
add(ruletimeSecfrac, position292)
}
goto l291
l290:
position, tokenIndex = position290, tokenIndex290
}
l291:
add(rulepartialTime, position288)
}
return true
l287:
position, tokenIndex = position287, tokenIndex287
return false
},
/* 51 fullDate <- <(dateFullYear '-' dateMonth '-' dateMDay)> */
nil,
/* 52 fullTime <- <(partialTime timeOffset)> */
nil,
/* 53 datetime <- <((fullDate ('T' fullTime)?) / partialTime)> */
nil,
/* 54 digit <- <[0-9]> */
func() bool {
position298, tokenIndex298 := position, tokenIndex
{
position299 := position
if c := buffer[position]; c < rune('0') || c > rune('9') {
goto l298
}
position++
add(ruledigit, position299)
}
return true
l298:
position, tokenIndex = position298, tokenIndex298
return false
},
/* 55 digitDual <- <(digit digit)> */
func() bool {
position300, tokenIndex300 := position, tokenIndex
{
position301 := position
if !_rules[ruledigit]() {
goto l300
}
if !_rules[ruledigit]() {
goto l300
}
add(ruledigitDual, position301)
}
return true
l300:
position, tokenIndex = position300, tokenIndex300
return false
},
/* 56 digitQuad <- <(digitDual digitDual)> */
nil,
/* 57 array <- <('[' Action23 wsnl arrayValues? wsnl ']')> */
nil,
/* 58 arrayValues <- <(val Action24 (wsnl comment? wsnl arraySep wsnl comment? wsnl val Action25)* wsnl arraySep? wsnl comment?)> */
nil,
/* 59 arraySep <- <','> */
func() bool {
position305, tokenIndex305 := position, tokenIndex
{
position306 := position
if buffer[position] != rune(',') {
goto l305
}
position++
add(rulearraySep, position306)
}
return true
l305:
position, tokenIndex = position305, tokenIndex305
return false
},
/* 61 Action0 <- <{ _ = buffer }> */
nil,
nil,
/* 63 Action1 <- <{ p.SetTableString(begin, end) }> */
nil,
/* 64 Action2 <- <{ p.AddLineCount(end - begin) }> */
nil,
/* 65 Action3 <- <{ p.AddLineCount(end - begin) }> */
nil,
/* 66 Action4 <- <{ p.AddKeyValue() }> */
nil,
/* 67 Action5 <- <{ p.SetKey(p.buffer, begin, end) }> */
nil,
/* 68 Action6 <- <{ p.SetKey(p.buffer, begin, end) }> */
nil,
/* 69 Action7 <- <{ p.SetTime(begin, end) }> */
nil,
/* 70 Action8 <- <{ p.SetFloat64(begin, end) }> */
nil,
/* 71 Action9 <- <{ p.SetInt64(begin, end) }> */
nil,
/* 72 Action10 <- <{ p.SetString(begin, end) }> */
nil,
/* 73 Action11 <- <{ p.SetBool(begin, end) }> */
nil,
/* 74 Action12 <- <{ p.SetArray(begin, end) }> */
nil,
/* 75 Action13 <- <{ p.SetTable(p.buffer, begin, end) }> */
nil,
/* 76 Action14 <- <{ p.SetArrayTable(p.buffer, begin, end) }> */
nil,
/* 77 Action15 <- <{ p.StartInlineTable() }> */
nil,
/* 78 Action16 <- <{ p.EndInlineTable() }> */
nil,
/* 79 Action17 <- <{ p.AddTableKey() }> */
nil,
/* 80 Action18 <- <{ p.SetBasicString(p.buffer, begin, end) }> */
nil,
/* 81 Action19 <- <{ p.SetMultilineString() }> */
nil,
/* 82 Action20 <- <{ p.AddMultilineBasicBody(p.buffer, begin, end) }> */
nil,
/* 83 Action21 <- <{ p.SetLiteralString(p.buffer, begin, end) }> */
nil,
/* 84 Action22 <- <{ p.SetMultilineLiteralString(p.buffer, begin, end) }> */
nil,
/* 85 Action23 <- <{ p.StartArray() }> */
nil,
/* 86 Action24 <- <{ p.AddArrayVal() }> */
nil,
/* 87 Action25 <- <{ p.AddArrayVal() }> */
nil,
}
p.rules = _rules
}
| 8,534 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/parse.peg | package toml
type tomlParser Peg {
toml
}
TOML <- Expression (newline Expression)* newline? !. { _ = buffer }
Expression <- (
<ws table ws comment? (wsnl keyval ws comment?)*> { p.SetTableString(begin, end) }
/ ws keyval ws comment?
/ ws comment?
/ ws
)
newline <- <[\r\n]+> { p.AddLineCount(end - begin) }
ws <- [ \t]*
wsnl <- (
[ \t]
/ <[\r\n]> { p.AddLineCount(end - begin) }
)*
comment <- '#' <[\t -\0x10FFFF]*>
keyval <- key ws '=' ws val { p.AddKeyValue() }
key <- bareKey / quotedKey
bareKey <- <[0-9A-Za-z\-_]+> { p.SetKey(p.buffer, begin, end) }
quotedKey <- < '"' basicChar* '"' > { p.SetKey(p.buffer, begin, end) }
val <- (
<datetime> { p.SetTime(begin, end) }
/ <float> { p.SetFloat64(begin, end) }
/ <integer> { p.SetInt64(begin, end) }
/ <string> { p.SetString(begin, end) }
/ <boolean> { p.SetBool(begin, end) }
/ <array> { p.SetArray(begin, end) }
/ inlineTable
)
table <- stdTable / arrayTable
stdTable <- '[' ws <tableKey> ws ']' { p.SetTable(p.buffer, begin, end) }
arrayTable <- '[[' ws <tableKey> ws ']]' { p.SetArrayTable(p.buffer, begin, end) }
inlineTable <- (
'{' { p.StartInlineTable() }
ws inlineTableKeyValues ws
'}' { p.EndInlineTable() }
)
inlineTableKeyValues <- (keyval inlineTableValSep?)*
tableKey <- tableKeyComp (tableKeySep tableKeyComp)*
tableKeyComp <- key { p.AddTableKey() }
tableKeySep <- ws '.' ws
inlineTableValSep <- ws ',' ws
integer <- [\-+]? int
int <- [1-9] (digit / '_' digit)+ / digit
float <- integer (frac exp? / frac? exp)
frac <- '.' digit (digit / '_' digit)*
exp <- [eE] [\-+]? digit (digit / '_' digit)*
string <- (
mlLiteralString
/ literalString
/ mlBasicString
/ basicString
)
basicString <- <'"' basicChar* '"'> { p.SetBasicString(p.buffer, begin, end) }
basicChar <- basicUnescaped / escaped
escaped <- escape ([btnfr"/\\] / 'u' hexQuad / 'U' hexQuad hexQuad)
basicUnescaped <- [ -!#-\[\]-\0x10FFFF]
escape <- '\\'
mlBasicString <- '"""' mlBasicBody '"""' { p.SetMultilineString() }
mlBasicBody <- (
<basicChar / newline> { p.AddMultilineBasicBody(p.buffer, begin, end) }
/ escape newline wsnl
)*
literalString <- "'" <literalChar*> "'" { p.SetLiteralString(p.buffer, begin, end) }
literalChar <- [\t -&(-\0x10FFFF]
mlLiteralString <- "'''" <mlLiteralBody> "'''" { p.SetMultilineLiteralString(p.buffer, begin, end) }
mlLiteralBody <- (!"'''" (mlLiteralChar / newline))*
mlLiteralChar <- [\t -\0x10FFFF]
hexdigit <- [0-9A-Fa-f]
hexQuad <- hexdigit hexdigit hexdigit hexdigit
boolean <- 'true' / 'false'
dateFullYear <- digitQuad
dateMonth <- digitDual
dateMDay <- digitDual
timeHour <- digitDual
timeMinute <- digitDual
timeSecond <- digitDual
timeSecfrac <- '.' digit+
timeNumoffset <- [\-+] timeHour ':' timeMinute
timeOffset <- 'Z' / timeNumoffset
partialTime <- timeHour ':' timeMinute ':' timeSecond timeSecfrac?
fullDate <- dateFullYear '-' dateMonth '-' dateMDay
fullTime <- partialTime timeOffset
datetime <- (fullDate ('T' fullTime)?) / partialTime
digit <- [0-9]
digitDual <- digit digit
digitQuad <- digitDual digitDual
array <- (
'[' { p.StartArray() }
wsnl arrayValues? wsnl
']'
)
arrayValues <- (
val { p.AddArrayVal() }
(
wsnl comment?
wsnl arraySep
wsnl comment?
wsnl val { p.AddArrayVal() }
)*
wsnl arraySep?
wsnl comment?
)
arraySep <- ','
| 8,535 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/.travis.yml | language: go
go:
- 1.3
- 1.x
install:
- go get -t -v ./...
script:
- go test ./...
| 8,536 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/toml/ast/ast.go | package ast
import (
"strconv"
"strings"
"time"
)
type Position struct {
Begin int
End int
}
type Value interface {
Pos() int
End() int
Source() string
}
type String struct {
Position Position
Value string
Data []rune
}
func (s *String) Pos() int {
return s.Position.Begin
}
func (s *String) End() int {
return s.Position.End
}
func (s *String) Source() string {
return string(s.Data)
}
type Integer struct {
Position Position
Value string
Data []rune
}
func (i *Integer) Pos() int {
return i.Position.Begin
}
func (i *Integer) End() int {
return i.Position.End
}
func (i *Integer) Source() string {
return string(i.Data)
}
func (i *Integer) Int() (int64, error) {
return strconv.ParseInt(i.Value, 10, 64)
}
type Float struct {
Position Position
Value string
Data []rune
}
func (f *Float) Pos() int {
return f.Position.Begin
}
func (f *Float) End() int {
return f.Position.End
}
func (f *Float) Source() string {
return string(f.Data)
}
func (f *Float) Float() (float64, error) {
return strconv.ParseFloat(f.Value, 64)
}
type Boolean struct {
Position Position
Value string
Data []rune
}
func (b *Boolean) Pos() int {
return b.Position.Begin
}
func (b *Boolean) End() int {
return b.Position.End
}
func (b *Boolean) Source() string {
return string(b.Data)
}
func (b *Boolean) Boolean() (bool, error) {
return strconv.ParseBool(b.Value)
}
type Datetime struct {
Position Position
Value string
Data []rune
}
func (d *Datetime) Pos() int {
return d.Position.Begin
}
func (d *Datetime) End() int {
return d.Position.End
}
func (d *Datetime) Source() string {
return string(d.Data)
}
func (d *Datetime) Time() (time.Time, error) {
switch {
case !strings.Contains(d.Value, ":"):
return time.Parse("2006-01-02", d.Value)
case !strings.Contains(d.Value, "-"):
return time.Parse("15:04:05.999999999", d.Value)
default:
return time.Parse(time.RFC3339Nano, d.Value)
}
}
type Array struct {
Position Position
Value []Value
Data []rune
}
func (a *Array) Pos() int {
return a.Position.Begin
}
func (a *Array) End() int {
return a.Position.End
}
func (a *Array) Source() string {
return string(a.Data)
}
type TableType uint8
const (
TableTypeNormal TableType = iota
TableTypeArray
)
var tableTypes = [...]string{
"normal",
"array",
}
func (t TableType) String() string {
return tableTypes[t]
}
type Table struct {
Position Position
Line int
Name string
Fields map[string]interface{}
Type TableType
Data []rune
}
func (t *Table) Pos() int {
return t.Position.Begin
}
func (t *Table) End() int {
return t.Position.End
}
func (t *Table) Source() string {
return string(t.Data)
}
type KeyValue struct {
Key string
Value Value
Line int
}
| 8,537 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/go-stringutil/README.md | # stringutil [](https://travis-ci.org/naoina/go-stringutil)
## Installation
go get -u github.com/naoina/go-stringutil
## Documentation
See https://godoc.org/github.com/naoina/go-stringutil
## License
MIT
| 8,538 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/go-stringutil/strings.go | package stringutil
import (
"sync"
"unicode"
"unicode/utf8"
)
var (
mu sync.Mutex
// Based on https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702
commonInitialismMap = map[string]struct{}{
"API": struct{}{},
"ASCII": struct{}{},
"CPU": struct{}{},
"CSRF": struct{}{},
"CSS": struct{}{},
"DNS": struct{}{},
"EOF": struct{}{},
"GUID": struct{}{},
"HTML": struct{}{},
"HTTP": struct{}{},
"HTTPS": struct{}{},
"ID": struct{}{},
"IP": struct{}{},
"JSON": struct{}{},
"LHS": struct{}{},
"QPS": struct{}{},
"RAM": struct{}{},
"RHS": struct{}{},
"RPC": struct{}{},
"SLA": struct{}{},
"SMTP": struct{}{},
"SQL": struct{}{},
"SSH": struct{}{},
"TCP": struct{}{},
"TLS": struct{}{},
"TTL": struct{}{},
"UDP": struct{}{},
"UI": struct{}{},
"UID": struct{}{},
"UUID": struct{}{},
"URI": struct{}{},
"URL": struct{}{},
"UTF8": struct{}{},
"VM": struct{}{},
"XML": struct{}{},
"XSRF": struct{}{},
"XSS": struct{}{},
}
commonInitialisms = keys(commonInitialismMap)
commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms))
longestLen = longestLength(commonInitialisms)
shortestLen = shortestLength(commonInitialisms, longestLen)
)
// ToUpperCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case.
// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'.
func ToUpperCamelCase(s string) string {
if s == "" {
return ""
}
upper := true
start := 0
result := make([]byte, 0, len(s))
var runeBuf [utf8.UTFMax]byte
var initialism []byte
for _, c := range s {
if c == '_' {
upper = true
candidate := string(result[start:])
initialism = initialism[:0]
for _, r := range candidate {
if r < utf8.RuneSelf {
initialism = append(initialism, toUpperASCII(byte(r)))
} else {
n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r))
initialism = append(initialism, runeBuf[:n]...)
}
}
if length := commonInitialism.LookupByBytes(initialism); length > 0 {
result = append(result[:start], initialism...)
}
start = len(result)
continue
}
if upper {
if c < utf8.RuneSelf {
result = append(result, toUpperASCII(byte(c)))
} else {
n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(c))
result = append(result, runeBuf[:n]...)
}
upper = false
continue
}
if c < utf8.RuneSelf {
result = append(result, byte(c))
} else {
n := utf8.EncodeRune(runeBuf[:], c)
result = append(result, runeBuf[:n]...)
}
}
candidate := string(result[start:])
initialism = initialism[:0]
for _, r := range candidate {
if r < utf8.RuneSelf {
initialism = append(initialism, toUpperASCII(byte(r)))
} else {
n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r))
initialism = append(initialism, runeBuf[:n]...)
}
}
if length := commonInitialism.LookupByBytes(initialism); length > 0 {
result = append(result[:start], initialism...)
}
return string(result)
}
// ToUpperCamelCaseASCII is similar to ToUpperCamelCase, but optimized for
// only the ASCII characters.
// ToUpperCamelCaseASCII is faster than ToUpperCamelCase, but doesn't work if
// contains non-ASCII characters.
func ToUpperCamelCaseASCII(s string) string {
if s == "" {
return ""
}
upper := true
start := 0
result := make([]byte, 0, len(s))
var initialism []byte
for i := 0; i < len(s); i++ {
c := s[i]
if c == '_' {
upper = true
candidate := result[start:]
initialism = initialism[:0]
for _, b := range candidate {
initialism = append(initialism, toUpperASCII(b))
}
if length := commonInitialism.LookupByBytes(initialism); length > 0 {
result = append(result[:start], initialism...)
}
start = len(result)
continue
}
if upper {
result = append(result, toUpperASCII(c))
upper = false
continue
}
result = append(result, c)
}
candidate := result[start:]
initialism = initialism[:0]
for _, b := range candidate {
initialism = append(initialism, toUpperASCII(b))
}
if length := commonInitialism.LookupByBytes(initialism); length > 0 {
result = append(result[:start], initialism...)
}
return string(result)
}
// ToSnakeCase returns a copy of the string s with all Unicode letters mapped to their snake case.
// It will insert letter of '_' at position of previous letter of uppercase and all
// letters convert to lower case.
// ToSnakeCase does not insert '_' letter into a common initialism word like ID, URL and so on.
func ToSnakeCase(s string) string {
if s == "" {
return ""
}
result := make([]byte, 0, len(s))
var runeBuf [utf8.UTFMax]byte
var j, skipCount int
for i, c := range s {
if i < skipCount {
continue
}
if unicode.IsUpper(c) {
if i != 0 {
result = append(result, '_')
}
next := nextIndex(j, len(s))
if length := commonInitialism.Lookup(s[j:next]); length > 0 {
for _, r := range s[j : j+length] {
if r < utf8.RuneSelf {
result = append(result, toLowerASCII(byte(r)))
} else {
n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(r))
result = append(result, runeBuf[:n]...)
}
}
j += length - 1
skipCount = i + length
continue
}
}
if c < utf8.RuneSelf {
result = append(result, toLowerASCII(byte(c)))
} else {
n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(c))
result = append(result, runeBuf[:n]...)
}
j++
}
return string(result)
}
// ToSnakeCaseASCII is similar to ToSnakeCase, but optimized for only the ASCII
// characters.
// ToSnakeCaseASCII is faster than ToSnakeCase, but doesn't work correctly if
// contains non-ASCII characters.
func ToSnakeCaseASCII(s string) string {
if s == "" {
return ""
}
result := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if isUpperASCII(c) {
if i != 0 {
result = append(result, '_')
}
if k := i + shortestLen - 1; k < len(s) && isUpperASCII(s[k]) {
if length := commonInitialism.Lookup(s[i:nextIndex(i, len(s))]); length > 0 {
for j, buf := 0, s[i:i+length]; j < len(buf); j++ {
result = append(result, toLowerASCII(buf[j]))
}
i += length - 1
continue
}
}
}
result = append(result, toLowerASCII(c))
}
return string(result)
}
// AddCommonInitialism adds ss to list of common initialisms.
func AddCommonInitialism(ss ...string) {
mu.Lock()
defer mu.Unlock()
for _, s := range ss {
commonInitialismMap[s] = struct{}{}
}
commonInitialisms = keys(commonInitialismMap)
commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms))
longestLen = longestLength(commonInitialisms)
shortestLen = shortestLength(commonInitialisms, longestLen)
}
// DelCommonInitialism deletes ss from list of common initialisms.
func DelCommonInitialism(ss ...string) {
mu.Lock()
defer mu.Unlock()
for _, s := range ss {
delete(commonInitialismMap, s)
}
commonInitialisms = keys(commonInitialismMap)
commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms))
longestLen = longestLength(commonInitialisms)
shortestLen = shortestLength(commonInitialisms, longestLen)
}
func isUpperASCII(c byte) bool {
return 'A' <= c && c <= 'Z'
}
func isLowerASCII(c byte) bool {
return 'a' <= c && c <= 'z'
}
func toUpperASCII(c byte) byte {
if isLowerASCII(c) {
return c - ('a' - 'A')
}
return c
}
func toLowerASCII(c byte) byte {
if isUpperASCII(c) {
return c + 'a' - 'A'
}
return c
}
func nextIndex(i, maxlen int) int {
if n := i + longestLen; n < maxlen {
return n
}
return maxlen
}
func keys(m map[string]struct{}) []string {
result := make([]string, 0, len(m))
for k := range m {
result = append(result, k)
}
return result
}
func shortestLength(strs []string, shortest int) int {
for _, s := range strs {
if candidate := utf8.RuneCountInString(s); candidate < shortest {
shortest = candidate
}
}
return shortest
}
func longestLength(strs []string) (longest int) {
for _, s := range strs {
if candidate := utf8.RuneCountInString(s); candidate > longest {
longest = candidate
}
}
return longest
}
| 8,539 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/go-stringutil/LICENSE | Copyright (c) 2015 Naoya Inada <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| 8,540 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/go-stringutil/da.go | package stringutil
import (
"fmt"
"sort"
"unicode/utf8"
)
const (
terminationCharacter = '#'
)
func mustDoubleArray(da *doubleArray, err error) *doubleArray {
if err != nil {
panic(err)
}
return da
}
func (da *doubleArray) Build(keys []string) error {
records := makeRecords(keys)
if err := da.build(records, 1, 0, make(map[int]struct{})); err != nil {
return err
}
return nil
}
type doubleArray struct {
bc []baseCheck
node []int
}
func newDoubleArray(keys []string) (*doubleArray, error) {
da := &doubleArray{
bc: []baseCheck{0},
node: []int{-1}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node.
}
if err := da.Build(keys); err != nil {
return nil, err
}
return da, nil
}
// baseCheck contains BASE, CHECK and Extra flags.
// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
//
// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
// |----------------------|--|--------|
// 32 10 8 0
type baseCheck uint32
func (bc baseCheck) Base() int {
return int(bc >> 10)
}
func (bc *baseCheck) SetBase(base int) {
*bc |= baseCheck(base) << 10
}
func (bc baseCheck) Check() byte {
return byte(bc)
}
func (bc *baseCheck) SetCheck(check byte) {
*bc |= baseCheck(check)
}
func (bc baseCheck) IsEmpty() bool {
return bc&0xfffffcff == 0
}
func (da *doubleArray) Lookup(path string) (length int) {
idx := 1
tmpIdx := idx
for i := 0; i < len(path); i++ {
c := path[i]
tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c)
if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c {
break
}
idx = tmpIdx
}
if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter {
return da.node[da.bc[next].Base()]
}
return -1
}
func (da *doubleArray) LookupByBytes(path []byte) (length int) {
idx := 1
tmpIdx := idx
for i := 0; i < len(path); i++ {
c := path[i]
tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c)
if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c {
break
}
idx = tmpIdx
}
if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter {
return da.node[da.bc[next].Base()]
}
return -1
}
func (da *doubleArray) build(srcs []record, idx, depth int, usedBase map[int]struct{}) error {
sort.Stable(recordSlice(srcs))
base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase)
if err != nil {
return err
}
if leaf != nil {
da.bc[idx].SetBase(len(da.node))
da.node = append(da.node, leaf.value)
}
for _, sib := range siblings {
da.setCheck(da.nextIndex(base, sib.c), sib.c)
}
for _, sib := range siblings {
if err := da.build(srcs[sib.start:sib.end], da.nextIndex(base, sib.c), depth+1, usedBase); err != nil {
return err
}
}
return nil
}
func (da *doubleArray) setBase(i, base int) {
da.bc[i].SetBase(base)
}
func (da *doubleArray) setCheck(i int, check byte) {
da.bc[i].SetCheck(check)
}
func (da *doubleArray) findEmptyIndex(start int) int {
i := start
for ; i < len(da.bc); i++ {
if da.bc[i].IsEmpty() {
break
}
}
return i
}
// findBase returns good BASE.
func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) {
for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) {
base = da.nextIndex(idx, firstChar)
if _, used := usedBase[base]; used {
continue
}
i := 0
for ; i < len(siblings); i++ {
next := da.nextIndex(base, siblings[i].c)
if len(da.bc) <= next {
da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...)
}
if !da.bc[next].IsEmpty() {
break
}
}
if i == len(siblings) {
break
}
}
usedBase[base] = struct{}{}
return base
}
func (da *doubleArray) arrange(records []record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) {
siblings, leaf, err = makeSiblings(records, depth)
if err != nil {
return -1, nil, nil, err
}
if len(siblings) < 1 {
return -1, nil, leaf, nil
}
base = da.findBase(siblings, idx, usedBase)
da.setBase(idx, base)
return base, siblings, leaf, err
}
type sibling struct {
start int
end int
c byte
}
func (da *doubleArray) nextIndex(base int, c byte) int {
return base ^ int(c)
}
func makeSiblings(records []record, depth int) (sib []sibling, leaf *record, err error) {
var (
pc byte
n int
)
for i, r := range records {
if len(r.key) <= depth {
leaf = &r
continue
}
c := r.key[depth]
switch {
case pc < c:
sib = append(sib, sibling{start: i, c: c})
case pc == c:
continue
default:
return nil, nil, fmt.Errorf("stringutil: BUG: records hasn't been sorted")
}
if n > 0 {
sib[n-1].end = i
}
pc = c
n++
}
if n == 0 {
return nil, leaf, nil
}
sib[n-1].end = len(records)
return sib, leaf, nil
}
type record struct {
key string
value int
}
func makeRecords(srcs []string) (records []record) {
termChar := string(terminationCharacter)
for _, s := range srcs {
records = append(records, record{
key: string(s + termChar),
value: utf8.RuneCountInString(s),
})
}
return records
}
type recordSlice []record
func (rs recordSlice) Len() int {
return len(rs)
}
func (rs recordSlice) Less(i, j int) bool {
return rs[i].key < rs[j].key
}
func (rs recordSlice) Swap(i, j int) {
rs[i], rs[j] = rs[j], rs[i]
}
| 8,541 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/naoina | kubeflow_public_repos/fate-operator/vendor/github.com/naoina/go-stringutil/.travis.yml | language: go
go:
- 1.4
- 1.5
- tip
install:
- go get -v github.com/naoina/go-stringutil
script:
- go test -v -bench . -benchmem ./...
| 8,542 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/go.mod | module github.com/spf13/cobra
go 1.12
require (
github.com/cpuguy83/go-md2man/v2 v2.0.0
github.com/inconshreveable/mousetrap v1.0.0
github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.0
gopkg.in/yaml.v2 v2.2.8
)
| 8,543 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/shell_completions.go | package cobra
import (
"github.com/spf13/pflag"
)
// MarkFlagRequired instructs the various shell completion implementations to
// prioritize the named flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(c.Flags(), name)
}
// MarkPersistentFlagRequired instructs the various shell completion implementations to
// prioritize the named persistent flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(c.PersistentFlags(), name)
}
// MarkFlagRequired instructs the various shell completion implementations to
// prioritize the named flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
}
// MarkFlagFilename instructs the various shell completion implementations to
// limit completions for the named flag to the specified file extensions.
func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.Flags(), name, extensions...)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// The bash completion script will call the bash function f for the flag.
//
// This will only work for bash completion.
// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
// to register a Go function which will work across all shells.
func (c *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(c.Flags(), name, f)
}
// MarkPersistentFlagFilename instructs the various shell completion
// implementations to limit completions for the named persistent flag to the
// specified file extensions.
func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
}
// MarkFlagFilename instructs the various shell completion implementations to
// limit completions for the named flag to the specified file extensions.
func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// The bash completion script will call the bash function f for the flag.
//
// This will only work for bash completion.
// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
// to register a Go function which will work across all shells.
func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
return flags.SetAnnotation(name, BashCompCustom, []string{f})
}
// MarkFlagDirname instructs the various shell completion implementations to
// limit completions for the named flag to directory names.
func (c *Command) MarkFlagDirname(name string) error {
return MarkFlagDirname(c.Flags(), name)
}
// MarkPersistentFlagDirname instructs the various shell completion
// implementations to limit completions for the named persistent flag to
// directory names.
func (c *Command) MarkPersistentFlagDirname(name string) error {
return MarkFlagDirname(c.PersistentFlags(), name)
}
// MarkFlagDirname instructs the various shell completion implementations to
// limit completions for the named flag to directory names.
func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
}
| 8,544 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/fish_completions.md | ## Generating Fish Completions For Your cobra.Command
Please refer to [Shell Completions](shell_completions.md) for details.
| 8,545 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/README.md | 
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
[](https://travis-ci.org/spf13/cobra)
[](https://godoc.org/github.com/spf13/cobra)
[](https://goreportcard.com/report/github.com/spf13/cobra)
[](https://gophers.slack.com/archives/CD3LP1199)
# Table of Contents
- [Overview](#overview)
- [Concepts](#concepts)
* [Commands](#commands)
* [Flags](#flags)
- [Installing](#installing)
- [Getting Started](#getting-started)
* [Using the Cobra Generator](#using-the-cobra-generator)
* [Using the Cobra Library](#using-the-cobra-library)
* [Working with Flags](#working-with-flags)
* [Positional and Custom Arguments](#positional-and-custom-arguments)
* [Example](#example)
* [Help Command](#help-command)
* [Usage Message](#usage-message)
* [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
* [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
* [Generating documentation for your command](#generating-documentation-for-your-command)
* [Generating shell completions](#generating-shell-completions)
- [Contributing](CONTRIBUTING.md)
- [License](#license)
# Overview
Cobra is a library providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools.
Cobra is also an application that will generate your application scaffolding to rapidly
develop a Cobra-based application.
Cobra provides:
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
* Fully POSIX-compliant flags (including short & long versions)
* Nested subcommands
* Global, local and cascading flags
* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
* Intelligent suggestions (`app srver`... did you mean `app server`?)
* Automatic help generation for commands and flags
* Automatic help flag recognition of `-h`, `--help`, etc.
* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibility to define your own help, usage, etc.
* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
# Concepts
Cobra is built on a structure of commands, arguments & flags.
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
The best applications will read like sentences when used. Users will know how
to use the application because they will natively understand how to use it.
The pattern to follow is
`APPNAME VERB NOUN --ADJECTIVE.`
or
`APPNAME COMMAND ARG --FLAG`
A few good real world examples may better illustrate this point.
In the following example, 'server' is a command, and 'port' is a flag:
hugo server --port=1313
In this command we are telling Git to clone the url bare.
git clone URL --bare
## Commands
Command is the central point of the application. Each interaction that
the application supports will be contained in a Command. A command can
have children commands and optionally run an action.
In the example above, 'server' is the command.
[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
## Flags
A flag is a way to modify the behavior of a command. Cobra supports
fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
A Cobra command can define flags that persist through to children commands
and flags that are only available to that command.
In the example above, 'port' is the flag.
Flag functionality is provided by the [pflag
library](https://github.com/spf13/pflag), a fork of the flag standard library
which maintains the same interface while adding POSIX compliance.
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
of the library. This command will install the `cobra` generator executable
along with the library and its dependencies:
go get -u github.com/spf13/cobra
Next, include Cobra in your application:
```go
import "github.com/spf13/cobra"
```
# Getting Started
While you are welcome to provide your own organization, typically a Cobra-based
application will follow the following organizational structure:
```
▾ appName/
▾ cmd/
add.go
your.go
commands.go
here.go
main.go
```
In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
```go
package main
import (
"{pathToYourApp}/cmd"
)
func main() {
cmd.Execute()
}
```
## Using the Cobra Generator
Cobra provides its own program that will create your application and add any
commands you want. It's the easiest way to incorporate Cobra into your application.
[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
## Using the Cobra Library
To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
You will optionally provide additional commands as you see fit.
### Create rootCmd
Cobra doesn't require any special constructors. Simply create your commands.
Ideally you place this in app/cmd/root.go:
```go
var rootCmd = &cobra.Command{
Use: "hugo",
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
```
You will additionally define flags and handle configuration in your init() function.
For example cmd/root.go:
```go
package cmd
import (
"fmt"
"os"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
// Used for flags.
cfgFile string
userLicense string
rootCmd = &cobra.Command{
Use: "cobra",
Short: "A generator for Cobra based Applications",
Long: `Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
}
)
// Execute executes the root command.
func Execute() error {
return rootCmd.Execute()
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
viper.SetDefault("license", "apache")
rootCmd.AddCommand(addCmd)
rootCmd.AddCommand(initCmd)
}
func er(msg interface{}) {
fmt.Println("Error:", msg)
os.Exit(1)
}
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
er(err)
}
// Search config in home directory with name ".cobra" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".cobra")
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
```
### Create your main.go
With the root command you need to have your main function execute it.
Execute should be run on the root for clarity, though it can be called on any command.
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
```go
package main
import (
"{pathToYourApp}/cmd"
)
func main() {
cmd.Execute()
}
```
### Create additional commands
Additional commands can be defined and typically are each given their own file
inside of the cmd/ directory.
If you wanted to create a version command you would create cmd/version.go and
populate it with the following:
```go
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number of Hugo",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
},
}
```
### Returning and handling errors
If you wish to return an error to the caller of a command, `RunE` can be used.
```go
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(tryCmd)
}
var tryCmd = &cobra.Command{
Use: "try",
Short: "Try and possibly fail at something",
RunE: func(cmd *cobra.Command, args []string) error {
if err := someFunc(); err != nil {
return err
}
return nil
},
}
```
The error can then be caught at the execute function call.
## Working with Flags
Flags provide modifiers to control how the action command operates.
### Assign flags to a command
Since the flags are defined and used in different locations, we need to
define a variable outside with the correct scope to assign the flag to
work with.
```go
var Verbose bool
var Source string
```
There are two different approaches to assign a flag.
### Persistent Flags
A flag can be 'persistent' meaning that this flag will be available to the
command it's assigned to as well as every command under that command. For
global flags, assign a flag as a persistent flag on the root.
```go
rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
```
### Local Flags
A flag can also be assigned locally which will only apply to that specific command.
```go
localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
```
### Local Flag on Parent Commands
By default Cobra only parses local flags on the target command, any local flags on
parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
parse local flags on each command before executing the target command.
```go
command := cobra.Command{
Use: "print [OPTIONS] [COMMANDS]",
TraverseChildren: true,
}
```
### Bind Flags with Config
You can also bind your flags with [viper](https://github.com/spf13/viper):
```go
var author string
func init() {
rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
}
```
In this example the persistent flag `author` is bound with `viper`.
**Note**, that the variable `author` will not be set to the value from config,
when the `--author` flag is not provided by user.
More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
### Required flags
Flags are optional by default. If instead you wish your command to report an error
when a flag has not been set, mark it as required:
```go
rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
rootCmd.MarkFlagRequired("region")
```
Or, for persistent flags:
```go
rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
rootCmd.MarkPersistentFlagRequired("region")
```
## Positional and Custom Arguments
Validation of positional arguments can be specified using the `Args` field
of `Command`.
The following validators are built in:
- `NoArgs` - the command will report an error if there are any positional args.
- `ArbitraryArgs` - the command will accept any args.
- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
An example of setting the custom validator:
```go
var cmd = &cobra.Command{
Short: "hello",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("requires a color argument")
}
if myapp.IsValidColor(args[0]) {
return nil
}
return fmt.Errorf("invalid color specified: %s", args[0])
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Hello, World!")
},
}
```
## Example
In the example below, we have defined three commands. Two are at the top level
and one (cmdTimes) is a child of one of the top commands. In this case the root
is not executable meaning that a subcommand is required. This is accomplished
by not providing a 'Run' for the 'rootCmd'.
We have only defined one flag for a single command.
More documentation about flags is available at https://github.com/spf13/pflag
```go
package main
import (
"fmt"
"strings"
"github.com/spf13/cobra"
)
func main() {
var echoTimes int
var cmdPrint = &cobra.Command{
Use: "print [string to print]",
Short: "Print anything to the screen",
Long: `print is for printing anything back to the screen.
For many years people have printed back to the screen.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
var cmdEcho = &cobra.Command{
Use: "echo [string to echo]",
Short: "Echo anything to the screen",
Long: `echo is for echoing anything back.
Echo works a lot like print, except it has a child command.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Echo: " + strings.Join(args, " "))
},
}
var cmdTimes = &cobra.Command{
Use: "times [string to echo]",
Short: "Echo anything to the screen more times",
Long: `echo things multiple times back to the user by providing
a count and a string.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
for i := 0; i < echoTimes; i++ {
fmt.Println("Echo: " + strings.Join(args, " "))
}
},
}
cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
var rootCmd = &cobra.Command{Use: "app"}
rootCmd.AddCommand(cmdPrint, cmdEcho)
cmdEcho.AddCommand(cmdTimes)
rootCmd.Execute()
}
```
For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
## Help Command
Cobra automatically adds a help command to your application when you have subcommands.
This will be called when a user runs 'app help'. Additionally, help will also
support all other commands as input. Say, for instance, you have a command called
'create' without any additional configuration; Cobra will work when 'app help
create' is called. Every command will automatically have the '--help' flag added.
### Example
The following output is automatically generated by Cobra. Nothing beyond the
command and flag definitions are needed.
$ cobra help
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.
Usage:
cobra [command]
Available Commands:
add Add a command to a Cobra Application
help Help about any command
init Initialize a Cobra Application
Flags:
-a, --author string author name for copyright attribution (default "YOUR NAME")
--config string config file (default is $HOME/.cobra.yaml)
-h, --help help for cobra
-l, --license string name of license for the project
--viper use Viper for configuration (default true)
Use "cobra [command] --help" for more information about a command.
Help is just a command like any other. There is no special logic or behavior
around it. In fact, you can provide your own if you want.
### Defining your own help
You can provide your own Help command or your own template for the default command to use
with following functions:
```go
cmd.SetHelpCommand(cmd *Command)
cmd.SetHelpFunc(f func(*Command, []string))
cmd.SetHelpTemplate(s string)
```
The latter two will also apply to any children commands.
## Usage Message
When the user provides an invalid flag or invalid command, Cobra responds by
showing the user the 'usage'.
### Example
You may recognize this from the help above. That's because the default help
embeds the usage as part of its output.
$ cobra --invalid
Error: unknown flag: --invalid
Usage:
cobra [command]
Available Commands:
add Add a command to a Cobra Application
help Help about any command
init Initialize a Cobra Application
Flags:
-a, --author string author name for copyright attribution (default "YOUR NAME")
--config string config file (default is $HOME/.cobra.yaml)
-h, --help help for cobra
-l, --license string name of license for the project
--viper use Viper for configuration (default true)
Use "cobra [command] --help" for more information about a command.
### Defining your own usage
You can provide your own usage function or template for Cobra to use.
Like help, the function and template are overridable through public methods:
```go
cmd.SetUsageFunc(f func(*Command) error)
cmd.SetUsageTemplate(s string)
```
## Version Flag
Cobra adds a top-level '--version' flag if the Version field is set on the root command.
Running an application with the '--version' flag will print the version to stdout using
the version template. The template can be customized using the
`cmd.SetVersionTemplate(s string)` function.
## PreRun and PostRun Hooks
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
- `PersistentPreRun`
- `PreRun`
- `Run`
- `PostRun`
- `PersistentPostRun`
An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
```go
package main
import (
"fmt"
"github.com/spf13/cobra"
)
func main() {
var rootCmd = &cobra.Command{
Use: "root [sub]",
Short: "My root command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
},
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
},
}
var subCmd = &cobra.Command{
Use: "sub [no options!]",
Short: "My subcommand",
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
},
}
rootCmd.AddCommand(subCmd)
rootCmd.SetArgs([]string{""})
rootCmd.Execute()
fmt.Println()
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
rootCmd.Execute()
}
```
Output:
```
Inside rootCmd PersistentPreRun with args: []
Inside rootCmd PreRun with args: []
Inside rootCmd Run with args: []
Inside rootCmd PostRun with args: []
Inside rootCmd PersistentPostRun with args: []
Inside rootCmd PersistentPreRun with args: [arg1 arg2]
Inside subCmd PreRun with args: [arg1 arg2]
Inside subCmd Run with args: [arg1 arg2]
Inside subCmd PostRun with args: [arg1 arg2]
Inside subCmd PersistentPostRun with args: [arg1 arg2]
```
## Suggestions when "unknown command" happens
Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
```
$ hugo srever
Error: unknown command "srever" for "hugo"
Did you mean this?
server
Run 'hugo --help' for usage.
```
Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
If you need to disable suggestions or tweak the string distance in your command, use:
```go
command.DisableSuggestions = true
```
or
```go
command.SuggestionsMinimumDistance = 1
```
You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
```
$ kubectl remove
Error: unknown command "remove" for "kubectl"
Did you mean this?
delete
Run 'kubectl help' for usage.
```
## Generating documentation for your command
Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md).
## Generating shell completions
Cobra can generate a shell-completion file for the following shells: Bash, Zsh, Fish, Powershell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
# License
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
| 8,546 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/bash_completions.md | # Generating Bash Completions For Your cobra.Command
Please refer to [Shell Completions](shell_completions.md) for details.
## Bash legacy dynamic completions
For backwards-compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
Some code that works in kubernetes:
```bash
const (
bash_completion_func = `__kubectl_parse_get()
{
local kubectl_output out
if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
out=($(echo "${kubectl_output}" | awk '{print $1}'))
COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
fi
}
__kubectl_get_resource()
{
if [[ ${#nouns[@]} -eq 0 ]]; then
return 1
fi
__kubectl_parse_get ${nouns[${#nouns[@]} -1]}
if [[ $? -eq 0 ]]; then
return 0
fi
}
__kubectl_custom_func() {
case ${last_command} in
kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
__kubectl_get_resource
return
;;
*)
;;
esac
}
`)
```
And then I set that in my command definition:
```go
cmds := &cobra.Command{
Use: "kubectl",
Short: "kubectl controls the Kubernetes cluster manager",
Long: `kubectl controls the Kubernetes cluster manager.
Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
Run: runHelp,
BashCompletionFunction: bash_completion_func,
}
```
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`__<command-use>_custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
Similarly, for flags:
```go
annotation := make(map[string][]string)
annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
flag := &pflag.Flag{
Name: "namespace",
Usage: usage,
Annotations: annotation,
}
cmd.Flags().AddFlag(flag)
```
In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
value, e.g.:
```bash
__kubectl_get_namespaces()
{
local template
template="{{ range .items }}{{ .metadata.name }} {{ end }}"
local kubectl_out
if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
fi
}
```
| 8,547 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/zsh_completions.md | ## Generating Zsh Completion For Your cobra.Command
Please refer to [Shell Completions](shell_completions.md) for details.
## Zsh completions standardization
Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced.
### Deprecation summary
See further below for more details on these deprecations.
* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored.
* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored.
* Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`.
* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored.
* Instead use `ValidArgsFunction`.
### Behavioral changes
**Noun completion**
|Old behavior|New behavior|
|---|---|
|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis|
|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`|
`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored|
|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)|
|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior|
**Flag-value completion**
|Old behavior|New behavior|
|---|---|
|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion|
|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored|
|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)|
|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells|
|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`|
|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`|
**Improvements**
* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`)
* File completion by default if no other completions found
* Handling of required flags
* File extension filtering no longer mutually exclusive with bash usage
* Completion of directory names *within* another directory
* Support for `=` form of flags
| 8,548 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/LICENSE.txt | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
| 8,549 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/args.go | package cobra
import (
"fmt"
"strings"
)
type PositionalArgs func(cmd *Command, args []string) error
// Legacy arg validation has the following behaviour:
// - root commands with no subcommands can take arbitrary arguments
// - root commands with subcommands will do subcommand validity checking
// - subcommands will always accept arbitrary arguments
func legacyArgs(cmd *Command, args []string) error {
// no subcommand, always take args
if !cmd.HasSubCommands() {
return nil
}
// root command with subcommands, do subcommand checking.
if !cmd.HasParent() && len(args) > 0 {
return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
return nil
}
// NoArgs returns an error if any args are included.
func NoArgs(cmd *Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
}
return nil
}
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
// Remove any description that may be included in ValidArgs.
// A description is following a tab character.
var validArgs []string
for _, v := range cmd.ValidArgs {
validArgs = append(validArgs, strings.Split(v, "\t")[0])
}
for _, v := range args {
if !stringInSlice(v, validArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
}
return nil
}
// ArbitraryArgs never returns an error.
func ArbitraryArgs(cmd *Command, args []string) error {
return nil
}
// MinimumNArgs returns an error if there is not at least N args.
func MinimumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < n {
return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
}
return nil
}
}
// MaximumNArgs returns an error if there are more than N args.
func MaximumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) > n {
return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
}
return nil
}
}
// ExactArgs returns an error if there are not exactly n args.
func ExactArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) != n {
return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
}
return nil
}
}
// ExactValidArgs returns an error if
// there are not exactly N positional args OR
// there are any positional args that are not in the `ValidArgs` field of `Command`
func ExactValidArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if err := ExactArgs(n)(cmd, args); err != nil {
return err
}
return OnlyValidArgs(cmd, args)
}
}
// RangeArgs returns an error if the number of args is not within the expected range.
func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < min || len(args) > max {
return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
}
return nil
}
}
| 8,550 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/custom_completions.go | package cobra
import (
"fmt"
"os"
"strings"
"github.com/spf13/pflag"
)
const (
// ShellCompRequestCmd is the name of the hidden command that is used to request
// completion results from the program. It is used by the shell completion scripts.
ShellCompRequestCmd = "__complete"
// ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
// completion results without their description. It is used by the shell completion scripts.
ShellCompNoDescRequestCmd = "__completeNoDesc"
)
// Global map of flag completion functions.
var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
// ShellCompDirective is a bit map representing the different behaviors the shell
// can be instructed to have once completions have been provided.
type ShellCompDirective int
const (
// ShellCompDirectiveError indicates an error occurred and completions should be ignored.
ShellCompDirectiveError ShellCompDirective = 1 << iota
// ShellCompDirectiveNoSpace indicates that the shell should not add a space
// after the completion even if there is a single completion provided.
ShellCompDirectiveNoSpace
// ShellCompDirectiveNoFileComp indicates that the shell should not provide
// file completion even when no completion is provided.
// This currently does not work for zsh or bash < 4
ShellCompDirectiveNoFileComp
// ShellCompDirectiveFilterFileExt indicates that the provided completions
// should be used as file extension filters.
// For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
// is a shortcut to using this directive explicitly. The BashCompFilenameExt
// annotation can also be used to obtain the same behavior for flags.
ShellCompDirectiveFilterFileExt
// ShellCompDirectiveFilterDirs indicates that only directory names should
// be provided in file completion. To request directory names within another
// directory, the returned completions should specify the directory within
// which to search. The BashCompSubdirsInDir annotation can be used to
// obtain the same behavior but only for flags.
ShellCompDirectiveFilterDirs
// ===========================================================================
// All directives using iota should be above this one.
// For internal use.
shellCompDirectiveMaxValue
// ShellCompDirectiveDefault indicates to let the shell perform its default
// behavior after completions have been provided.
// This one must be last to avoid messing up the iota count.
ShellCompDirectiveDefault ShellCompDirective = 0
)
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
if flag == nil {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
}
if _, exists := flagCompletionFunctions[flag]; exists {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
}
flagCompletionFunctions[flag] = f
return nil
}
// Returns a string listing the different directive enabled in the specified parameter
func (d ShellCompDirective) string() string {
var directives []string
if d&ShellCompDirectiveError != 0 {
directives = append(directives, "ShellCompDirectiveError")
}
if d&ShellCompDirectiveNoSpace != 0 {
directives = append(directives, "ShellCompDirectiveNoSpace")
}
if d&ShellCompDirectiveNoFileComp != 0 {
directives = append(directives, "ShellCompDirectiveNoFileComp")
}
if d&ShellCompDirectiveFilterFileExt != 0 {
directives = append(directives, "ShellCompDirectiveFilterFileExt")
}
if d&ShellCompDirectiveFilterDirs != 0 {
directives = append(directives, "ShellCompDirectiveFilterDirs")
}
if len(directives) == 0 {
directives = append(directives, "ShellCompDirectiveDefault")
}
if d >= shellCompDirectiveMaxValue {
return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
}
return strings.Join(directives, ", ")
}
// Adds a special hidden command that can be used to request custom completions.
func (c *Command) initCompleteCmd(args []string) {
completeCmd := &Command{
Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
Aliases: []string{ShellCompNoDescRequestCmd},
DisableFlagsInUseLine: true,
Hidden: true,
DisableFlagParsing: true,
Args: MinimumNArgs(1),
Short: "Request shell completion choices for the specified command-line",
Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
"to request completion choices for the specified command-line.", ShellCompRequestCmd),
Run: func(cmd *Command, args []string) {
finalCmd, completions, directive, err := cmd.getCompletions(args)
if err != nil {
CompErrorln(err.Error())
// Keep going for multiple reasons:
// 1- There could be some valid completions even though there was an error
// 2- Even without completions, we need to print the directive
}
noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
for _, comp := range completions {
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.Split(comp, "\t")[0]
}
// Make sure we only write the first line to the output.
// This is needed if a description contains a linebreak.
// Otherwise the shell scripts will interpret the other lines as new flags
// and could therefore provide a wrong completion.
comp = strings.Split(comp, "\n")[0]
// Finally trim the completion. This is especially important to get rid
// of a trailing tab when there are no description following it.
// For example, a sub-command without a description should not be completed
// with a tab at the end (or else zsh will show a -- following it
// although there is no description).
comp = strings.TrimSpace(comp)
// Print each possible completion to stdout for the completion script to consume.
fmt.Fprintln(finalCmd.OutOrStdout(), comp)
}
if directive >= shellCompDirectiveMaxValue {
directive = ShellCompDirectiveDefault
}
// As the last printout, print the completion directive for the completion script to parse.
// The directive integer must be that last character following a single colon (:).
// The completion script expects :<directive>
fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
// Print some helpful info to stderr for the user to understand.
// Output from stderr must be ignored by the completion script.
fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
},
}
c.AddCommand(completeCmd)
subCmd, _, err := c.Find(args)
if err != nil || subCmd.Name() != ShellCompRequestCmd {
// Only create this special command if it is actually being called.
// This reduces possible side-effects of creating such a command;
// for example, having this command would cause problems to a
// cobra program that only consists of the root command, since this
// command would cause the root command to suddenly have a subcommand.
c.RemoveCommand(completeCmd)
}
}
func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
// The last argument, which is not completely typed by the user,
// should not be part of the list of arguments
toComplete := args[len(args)-1]
trimmedArgs := args[:len(args)-1]
var finalCmd *Command
var finalArgs []string
var err error
// Find the real command for which completion must be performed
// check if we need to traverse here to parse local flags on parent commands
if c.Root().TraverseChildren {
finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
} else {
finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
}
if err != nil {
// Unable to find the real command. E.g., <program> someInvalidCmd <TAB>
return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
}
// Check if we are doing flag value completion before parsing the flags.
// This is important because if we are completing a flag value, we need to also
// remove the flag name argument from the list of finalArgs or else the parsing
// could fail due to an invalid value (incomplete) for the flag.
flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
if err != nil {
// Error while attempting to parse flags
return finalCmd, []string{}, ShellCompDirectiveDefault, err
}
// Parse the flags early so we can check if required flags are set
if err = finalCmd.ParseFlags(finalArgs); err != nil {
return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
}
if flag != nil {
// Check if we are completing a flag value subject to annotations
if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
if len(validExts) != 0 {
// File completion filtered by extensions
return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
}
// The annotation requests simple file completion. There is no reason to do
// that since it is the default behavior anyway. Let's ignore this annotation
// in case the program also registered a completion function for this flag.
// Even though it is a mistake on the program's side, let's be nice when we can.
}
if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
if len(subDir) == 1 {
// Directory completion from within a directory
return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
}
// Directory completion
return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
}
}
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
// the flag name to be complete
if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
var completions []string
// First check for required flags
completions = completeRequireFlags(finalCmd, toComplete)
// If we have not found any required flags, only then can we show regular flags
if len(completions) == 0 {
doCompleteFlags := func(flag *pflag.Flag) {
if !flag.Changed ||
strings.Contains(flag.Value.Type(), "Slice") ||
strings.Contains(flag.Value.Type(), "Array") {
// If the flag is not already present, or if it can be specified multiple times (Array or Slice)
// we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
}
directive := ShellCompDirectiveNoFileComp
if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
// If there is a single completion, the shell usually adds a space
// after the completion. We don't want that if the flag ends with an =
directive = ShellCompDirectiveNoSpace
}
return finalCmd, completions, directive, nil
}
// We only remove the flags from the arguments if DisableFlagParsing is not set.
// This is important for commands which have requested to do their own flag completion.
if !finalCmd.DisableFlagParsing {
finalArgs = finalCmd.Flags().Args()
}
var completions []string
directive := ShellCompDirectiveDefault
if flag == nil {
foundLocalNonPersistentFlag := false
// If TraverseChildren is true on the root command we don't check for
// local flags because we can use a local flag on a parent command
if !finalCmd.Root().TraverseChildren {
// Check if there are any local, non-persistent flags on the command-line
localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
foundLocalNonPersistentFlag = true
}
})
}
// Complete subcommand names, including the help command
if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
// We only complete sub-commands if:
// - there are no arguments on the command-line and
// - there are no local, non-peristent flag on the command-line or TraverseChildren is true
for _, subCmd := range finalCmd.Commands() {
if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
if strings.HasPrefix(subCmd.Name(), toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
}
directive = ShellCompDirectiveNoFileComp
}
}
}
// Complete required flags even without the '-' prefix
completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
// Always complete ValidArgs, even if we are completing a subcommand name.
// This is for commands that have both subcommands and ValidArgs.
if len(finalCmd.ValidArgs) > 0 {
if len(finalArgs) == 0 {
// ValidArgs are only for the first argument
for _, validArg := range finalCmd.ValidArgs {
if strings.HasPrefix(validArg, toComplete) {
completions = append(completions, validArg)
}
}
directive = ShellCompDirectiveNoFileComp
// If no completions were found within commands or ValidArgs,
// see if there are any ArgAliases that should be completed.
if len(completions) == 0 {
for _, argAlias := range finalCmd.ArgAliases {
if strings.HasPrefix(argAlias, toComplete) {
completions = append(completions, argAlias)
}
}
}
}
// If there are ValidArgs specified (even if they don't match), we stop completion.
// Only one of ValidArgs or ValidArgsFunction can be used for a single command.
return finalCmd, completions, directive, nil
}
// Let the logic continue so as to add any ValidArgsFunction completions,
// even if we already found sub-commands.
// This is for commands that have subcommands but also specify a ValidArgsFunction.
}
// Find the completion function for the flag or command
var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
if flag != nil {
completionFn = flagCompletionFunctions[flag]
} else {
completionFn = finalCmd.ValidArgsFunction
}
if completionFn != nil {
// Go custom completion defined for this flag or command.
// Call the registered completion function to get the completions.
var comps []string
comps, directive = completionFn(finalCmd, finalArgs, toComplete)
completions = append(completions, comps...)
}
return finalCmd, completions, directive, nil
}
func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
if nonCompletableFlag(flag) {
return []string{}
}
var completions []string
flagName := "--" + flag.Name
if strings.HasPrefix(flagName, toComplete) {
// Flag without the =
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// Why suggest both long forms: --flag and --flag= ?
// This forces the user to *always* have to type either an = or a space after the flag name.
// Let's be nice and avoid making users have to do that.
// Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
// The = form will still work, we just won't suggest it.
// This also makes the list of suggested flags shorter as we avoid all the = forms.
//
// if len(flag.NoOptDefVal) == 0 {
// // Flag requires a value, so it can be suffixed with =
// flagName += "="
// completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// }
}
flagName = "-" + flag.Shorthand
if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
}
return completions
}
func completeRequireFlags(finalCmd *Command, toComplete string) []string {
var completions []string
doCompleteRequiredFlags := func(flag *pflag.Flag) {
if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
if !flag.Changed {
// If the flag is not already present, we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
return completions
}
func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
if finalCmd.DisableFlagParsing {
// We only do flag completion if we are allowed to parse flags
// This is important for commands which have requested to do their own flag completion.
return nil, args, lastArg, nil
}
var flagName string
trimmedArgs := args
flagWithEqual := false
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as that function
// requires the flag name to be complete
if len(lastArg) > 0 && lastArg[0] == '-' {
if index := strings.Index(lastArg, "="); index >= 0 {
// Flag with an =
flagName = strings.TrimLeft(lastArg[:index], "-")
lastArg = lastArg[index+1:]
flagWithEqual = true
} else {
// Normal flag completion
return nil, args, lastArg, nil
}
}
if len(flagName) == 0 {
if len(args) > 0 {
prevArg := args[len(args)-1]
if isFlagArg(prevArg) {
// Only consider the case where the flag does not contain an =.
// If the flag contains an = it means it has already been fully processed,
// so we don't need to deal with it here.
if index := strings.Index(prevArg, "="); index < 0 {
flagName = strings.TrimLeft(prevArg, "-")
// Remove the uncompleted flag or else there could be an error created
// for an invalid value for that flag
trimmedArgs = args[:len(args)-1]
}
}
}
}
if len(flagName) == 0 {
// Not doing flag completion
return nil, trimmedArgs, lastArg, nil
}
flag := findFlag(finalCmd, flagName)
if flag == nil {
// Flag not supported by this command, nothing to complete
err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
return nil, nil, "", err
}
if !flagWithEqual {
if len(flag.NoOptDefVal) != 0 {
// We had assumed dealing with a two-word flag but the flag is a boolean flag.
// In that case, there is no value following it, so we are not really doing flag completion.
// Reset everything to do noun completion.
trimmedArgs = args
flag = nil
}
}
return flag, trimmedArgs, lastArg, nil
}
func findFlag(cmd *Command, name string) *pflag.Flag {
flagSet := cmd.Flags()
if len(name) == 1 {
// First convert the short flag into a long flag
// as the cmd.Flag() search only accepts long flags
if short := flagSet.ShorthandLookup(name); short != nil {
name = short.Name
} else {
set := cmd.InheritedFlags()
if short = set.ShorthandLookup(name); short != nil {
name = short.Name
} else {
return nil
}
}
}
return cmd.Flag(name)
}
// CompDebug prints the specified string to the same file as where the
// completion script prints its logs.
// Note that completion printouts should never be on stdout as they would
// be wrongly interpreted as actual completion choices by the completion script.
func CompDebug(msg string, printToStdErr bool) {
msg = fmt.Sprintf("[Debug] %s", msg)
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
f, err := os.OpenFile(path,
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
defer f.Close()
f.WriteString(msg)
}
}
if printToStdErr {
// Must print to stderr for this not to be read by the completion script.
fmt.Fprintf(os.Stderr, msg)
}
}
// CompDebugln prints the specified string with a newline at the end
// to the same file as where the completion script prints its logs.
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
func CompDebugln(msg string, printToStdErr bool) {
CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
}
// CompError prints the specified completion message to stderr.
func CompError(msg string) {
msg = fmt.Sprintf("[Error] %s", msg)
CompDebug(msg, true)
}
// CompErrorln prints the specified completion message to stderr with a newline at the end.
func CompErrorln(msg string) {
CompError(fmt.Sprintf("%s\n", msg))
}
| 8,551 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/CHANGELOG.md | # Cobra Changelog
## Pending
* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` @jpmcb
## v1.0.0
Announcing v1.0.0 of Cobra. 🎉
**Notable Changes**
* Fish completion (including support for Go custom completion) @marckhouzam
* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
* Remove/replace SetOutput on Command - deprecated @jpmcb
* add support for autolabel stale PR @xchapter7x
* Add Labeler Actions @xchapter7x
* Custom completions coded in Go (instead of Bash) @marckhouzam
* Partial Revert of #922 @jharshman
* Add Makefile to project @jharshman
* Correct documentation for InOrStdin @desponda
* Apply formatting to templates @jharshman
* Revert change so help is printed on stdout again @marckhouzam
* Update md2man to v2.0.0 @pdf
* update viper to v1.4.0 @umarcor
* Update cmd/root.go example in README.md @jharshman
| 8,552 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/powershell_completions.md | # Generating PowerShell Completions For Your Own cobra.Command
Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
*Note*: PowerShell completions have not (yet?) been aligned to Cobra's generic shell completion support. This implies the PowerShell completions are not as rich as for other shells (see [What's not yet supported](#whats-not-yet-supported)), and may behave slightly differently. They are still very useful for PowerShell users.
# What's supported
- Completion for subcommands using their `.Short` description
- Completion for non-hidden flags using their `.Name` and `.Shorthand`
# What's not yet supported
- Command aliases
- Required, filename or custom flags (they will work like normal flags)
- Custom completion scripts
| 8,553 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/Makefile | BIN="./bin"
SRC=$(shell find . -name "*.go")
ifeq (, $(shell which richgo))
$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
endif
.PHONY: fmt vet test cobra_generator install_deps clean
default: all
all: fmt vet test cobra_generator
fmt:
$(info ******************** checking formatting ********************)
@test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
test: install_deps vet
$(info ******************** running tests ********************)
richgo test -v ./...
cobra_generator: install_deps
$(info ******************** building generator ********************)
mkdir -p $(BIN)
make -C cobra all
install_deps:
$(info ******************** downloading dependencies ********************)
go get -v ./...
vet:
$(info ******************** vetting ********************)
go vet ./...
clean:
rm -rf $(BIN)
| 8,554 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/CONTRIBUTING.md | # Contributing to Cobra
Thank you so much for contributing to Cobra. We appreciate your time and help.
Here are some guidelines to help you get started.
## Code of Conduct
Be kind and respectful to the members of the community. Take time to educate
others who are seeking help. Harassment of any kind will not be tolerated.
## Questions
If you have questions regarding Cobra, feel free to ask it in the community
[#cobra Slack channel][cobra-slack]
## Filing a bug or feature
1. Before filing an issue, please check the existing issues to see if a
similar one was already opened. If there is one already opened, feel free
to comment on it.
1. If you believe you've found a bug, please provide detailed steps of
reproduction, the version of Cobra and anything else you believe will be
useful to help troubleshoot it (e.g. OS environment, environment variables,
etc...). Also state the current behavior vs. the expected behavior.
1. If you'd like to see a feature or an enhancement please open an issue with
a clear title and description of what the feature is and why it would be
beneficial to the project and its users.
## Submitting changes
1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
sign a CLA. Please sign the CLA :slightly_smiling_face:
1. Tests: If you are submitting code, please ensure you have adequate tests
for the feature. Tests can be run via `go test ./...` or `make test`.
1. Since this is golang project, ensure the new code is properly formatted to
ensure code consistency. Run `make all`.
### Quick steps to contribute
1. Fork the project.
1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
1. Create your feature branch (`git checkout -b my-new-feature`)
1. Make changes and run tests (`make test`)
1. Add them to staging (`git add .`)
1. Commit your changes (`git commit -m 'Add some feature'`)
1. Push to the branch (`git push origin my-new-feature`)
1. Create new pull request
<!-- Links -->
[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
| 8,555 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/command.go | // Copyright © 2013 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
package cobra
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
flag "github.com/spf13/pflag"
)
// FParseErrWhitelist configures Flag parse errors to be ignored
type FParseErrWhitelist flag.ParseErrorsWhitelist
// Command is just that, a command for your application.
// E.g. 'go run ...' - 'run' is the command. Cobra requires
// you to define the usage and description as part of your command
// definition to ensure usability.
type Command struct {
// Use is the one-line usage message.
// Recommended syntax is as follow:
// [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
// ... indicates that you can specify multiple values for the previous argument.
// | indicates mutually exclusive information. You can use the argument to the left of the separator or the
// argument to the right of the separator. You cannot use both arguments in a single use of the command.
// { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
// optional, they are enclosed in brackets ([ ]).
// Example: add [-F file | -D dir]... [-f format] profile
Use string
// Aliases is an array of aliases that can be used instead of the first word in Use.
Aliases []string
// SuggestFor is an array of command names for which this command will be suggested -
// similar to aliases but only suggests.
SuggestFor []string
// Short is the short description shown in the 'help' output.
Short string
// Long is the long message shown in the 'help <this-command>' output.
Long string
// Example is examples of how to use the command.
Example string
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string
// ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
// It is a dynamic version of using ValidArgs.
// Only one of ValidArgs and ValidArgsFunction can be used for a command.
ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
// Expected arguments
Args PositionalArgs
// ArgAliases is List of aliases for ValidArgs.
// These are not suggested to the user in the bash completion,
// but accepted if entered manually.
ArgAliases []string
// BashCompletionFunction is custom functions used by the bash autocompletion generator.
BashCompletionFunction string
// Deprecated defines, if this command is deprecated and should print this string when used.
Deprecated string
// Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
Hidden bool
// Annotations are key/value pairs that can be used by applications to identify or
// group commands.
Annotations map[string]string
// Version defines the version for this command. If this value is non-empty and the command does not
// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
// will print content of the "Version" variable. A shorthand "v" flag will also be added if the
// command does not define one.
Version string
// The *Run functions are executed in the following order:
// * PersistentPreRun()
// * PreRun()
// * Run()
// * PostRun()
// * PersistentPostRun()
// All functions get the same args, the arguments after the command name.
//
// PersistentPreRun: children of this command will inherit and execute.
PersistentPreRun func(cmd *Command, args []string)
// PersistentPreRunE: PersistentPreRun but returns an error.
PersistentPreRunE func(cmd *Command, args []string) error
// PreRun: children of this command will not inherit.
PreRun func(cmd *Command, args []string)
// PreRunE: PreRun but returns an error.
PreRunE func(cmd *Command, args []string) error
// Run: Typically the actual work function. Most commands will only implement this.
Run func(cmd *Command, args []string)
// RunE: Run but returns an error.
RunE func(cmd *Command, args []string) error
// PostRun: run after the Run command.
PostRun func(cmd *Command, args []string)
// PostRunE: PostRun but returns an error.
PostRunE func(cmd *Command, args []string) error
// PersistentPostRun: children of this command will inherit and execute after PostRun.
PersistentPostRun func(cmd *Command, args []string)
// PersistentPostRunE: PersistentPostRun but returns an error.
PersistentPostRunE func(cmd *Command, args []string) error
// SilenceErrors is an option to quiet errors down stream.
SilenceErrors bool
// SilenceUsage is an option to silence usage when an error occurs.
SilenceUsage bool
// DisableFlagParsing disables the flag parsing.
// If this is true all flags will be passed to the command as arguments.
DisableFlagParsing bool
// DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
// will be printed by generating docs for this command.
DisableAutoGenTag bool
// DisableFlagsInUseLine will disable the addition of [flags] to the usage
// line of a command when printing help or generating docs
DisableFlagsInUseLine bool
// DisableSuggestions disables the suggestions based on Levenshtein distance
// that go along with 'unknown command' messages.
DisableSuggestions bool
// SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
// Must be > 0.
SuggestionsMinimumDistance int
// TraverseChildren parses flags on all parents before executing child command.
TraverseChildren bool
// FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
ctx context.Context
// commands is the list of commands supported by this program.
commands []*Command
// parent is a parent command for this command.
parent *Command
// Max lengths of commands' string lengths for use in padding.
commandsMaxUseLen int
commandsMaxCommandPathLen int
commandsMaxNameLen int
// commandsAreSorted defines, if command slice are sorted or not.
commandsAreSorted bool
// commandCalledAs is the name or alias value used to call this command.
commandCalledAs struct {
name string
called bool
}
// args is actual args parsed from flags.
args []string
// flagErrorBuf contains all error messages from pflag.
flagErrorBuf *bytes.Buffer
// flags is full set of flags.
flags *flag.FlagSet
// pflags contains persistent flags.
pflags *flag.FlagSet
// lflags contains local flags.
lflags *flag.FlagSet
// iflags contains inherited flags.
iflags *flag.FlagSet
// parentsPflags is all persistent flags of cmd's parents.
parentsPflags *flag.FlagSet
// globNormFunc is the global normalization function
// that we can use on every pflag set and children commands
globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
// usageFunc is usage func defined by user.
usageFunc func(*Command) error
// usageTemplate is usage template defined by user.
usageTemplate string
// flagErrorFunc is func defined by user and it's called when the parsing of
// flags returns an error.
flagErrorFunc func(*Command, error) error
// helpTemplate is help template defined by user.
helpTemplate string
// helpFunc is help func defined by user.
helpFunc func(*Command, []string)
// helpCommand is command with usage 'help'. If it's not defined by user,
// cobra uses default help command.
helpCommand *Command
// versionTemplate is the version template defined by user.
versionTemplate string
// inReader is a reader defined by the user that replaces stdin
inReader io.Reader
// outWriter is a writer defined by the user that replaces stdout
outWriter io.Writer
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
}
// Context returns underlying command context. If command wasn't
// executed with ExecuteContext Context returns Background context.
func (c *Command) Context() context.Context {
return c.ctx
}
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
// Deprecated: Use SetOut and/or SetErr instead
func (c *Command) SetOutput(output io.Writer) {
c.outWriter = output
c.errWriter = output
}
// SetOut sets the destination for usage messages.
// If newOut is nil, os.Stdout is used.
func (c *Command) SetOut(newOut io.Writer) {
c.outWriter = newOut
}
// SetErr sets the destination for error messages.
// If newErr is nil, os.Stderr is used.
func (c *Command) SetErr(newErr io.Writer) {
c.errWriter = newErr
}
// SetIn sets the source for input data
// If newIn is nil, os.Stdin is used.
func (c *Command) SetIn(newIn io.Reader) {
c.inReader = newIn
}
// SetUsageFunc sets usage function. Usage can be defined by application.
func (c *Command) SetUsageFunc(f func(*Command) error) {
c.usageFunc = f
}
// SetUsageTemplate sets usage template. Can be defined by Application.
func (c *Command) SetUsageTemplate(s string) {
c.usageTemplate = s
}
// SetFlagErrorFunc sets a function to generate an error when flag parsing
// fails.
func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
c.flagErrorFunc = f
}
// SetHelpFunc sets help function. Can be defined by Application.
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
c.helpFunc = f
}
// SetHelpCommand sets help command.
func (c *Command) SetHelpCommand(cmd *Command) {
c.helpCommand = cmd
}
// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
func (c *Command) SetHelpTemplate(s string) {
c.helpTemplate = s
}
// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
func (c *Command) SetVersionTemplate(s string) {
c.versionTemplate = s
}
// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
// The user should not have a cyclic dependency on commands.
func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
c.Flags().SetNormalizeFunc(n)
c.PersistentFlags().SetNormalizeFunc(n)
c.globNormFunc = n
for _, command := range c.commands {
command.SetGlobalNormalizationFunc(n)
}
}
// OutOrStdout returns output to stdout.
func (c *Command) OutOrStdout() io.Writer {
return c.getOut(os.Stdout)
}
// OutOrStderr returns output to stderr
func (c *Command) OutOrStderr() io.Writer {
return c.getOut(os.Stderr)
}
// ErrOrStderr returns output to stderr
func (c *Command) ErrOrStderr() io.Writer {
return c.getErr(os.Stderr)
}
// InOrStdin returns input to stdin
func (c *Command) InOrStdin() io.Reader {
return c.getIn(os.Stdin)
}
func (c *Command) getOut(def io.Writer) io.Writer {
if c.outWriter != nil {
return c.outWriter
}
if c.HasParent() {
return c.parent.getOut(def)
}
return def
}
func (c *Command) getErr(def io.Writer) io.Writer {
if c.errWriter != nil {
return c.errWriter
}
if c.HasParent() {
return c.parent.getErr(def)
}
return def
}
func (c *Command) getIn(def io.Reader) io.Reader {
if c.inReader != nil {
return c.inReader
}
if c.HasParent() {
return c.parent.getIn(def)
}
return def
}
// UsageFunc returns either the function set by SetUsageFunc for this command
// or a parent, or it returns a default usage function.
func (c *Command) UsageFunc() (f func(*Command) error) {
if c.usageFunc != nil {
return c.usageFunc
}
if c.HasParent() {
return c.Parent().UsageFunc()
}
return func(c *Command) error {
c.mergePersistentFlags()
err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
if err != nil {
c.PrintErrln(err)
}
return err
}
}
// Usage puts out the usage for the command.
// Used when a user provides invalid input.
// Can be defined by user by overriding UsageFunc.
func (c *Command) Usage() error {
return c.UsageFunc()(c)
}
// HelpFunc returns either the function set by SetHelpFunc for this command
// or a parent, or it returns a function with default help behavior.
func (c *Command) HelpFunc() func(*Command, []string) {
if c.helpFunc != nil {
return c.helpFunc
}
if c.HasParent() {
return c.Parent().HelpFunc()
}
return func(c *Command, a []string) {
c.mergePersistentFlags()
// The help should be sent to stdout
// See https://github.com/spf13/cobra/issues/1002
err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
if err != nil {
c.PrintErrln(err)
}
}
}
// Help puts out the help for the command.
// Used when a user calls help [command].
// Can be defined by user by overriding HelpFunc.
func (c *Command) Help() error {
c.HelpFunc()(c, []string{})
return nil
}
// UsageString returns usage string.
func (c *Command) UsageString() string {
// Storing normal writers
tmpOutput := c.outWriter
tmpErr := c.errWriter
bb := new(bytes.Buffer)
c.outWriter = bb
c.errWriter = bb
c.Usage()
// Setting things back to normal
c.outWriter = tmpOutput
c.errWriter = tmpErr
return bb.String()
}
// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
// command or a parent, or it returns a function which returns the original
// error.
func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
if c.flagErrorFunc != nil {
return c.flagErrorFunc
}
if c.HasParent() {
return c.parent.FlagErrorFunc()
}
return func(c *Command, err error) error {
return err
}
}
var minUsagePadding = 25
// UsagePadding return padding for the usage.
func (c *Command) UsagePadding() int {
if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
return minUsagePadding
}
return c.parent.commandsMaxUseLen
}
var minCommandPathPadding = 11
// CommandPathPadding return padding for the command path.
func (c *Command) CommandPathPadding() int {
if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
return minCommandPathPadding
}
return c.parent.commandsMaxCommandPathLen
}
var minNamePadding = 11
// NamePadding returns padding for the name.
func (c *Command) NamePadding() int {
if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
return minNamePadding
}
return c.parent.commandsMaxNameLen
}
// UsageTemplate returns usage template for the command.
func (c *Command) UsageTemplate() string {
if c.usageTemplate != "" {
return c.usageTemplate
}
if c.HasParent() {
return c.parent.UsageTemplate()
}
return `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`
}
// HelpTemplate return help template for the command.
func (c *Command) HelpTemplate() string {
if c.helpTemplate != "" {
return c.helpTemplate
}
if c.HasParent() {
return c.parent.HelpTemplate()
}
return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
}
// VersionTemplate return version template for the command.
func (c *Command) VersionTemplate() string {
if c.versionTemplate != "" {
return c.versionTemplate
}
if c.HasParent() {
return c.parent.VersionTemplate()
}
return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
`
}
func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
flag := fs.Lookup(name)
if flag == nil {
return false
}
return flag.NoOptDefVal != ""
}
func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
if len(name) == 0 {
return false
}
flag := fs.ShorthandLookup(name[:1])
if flag == nil {
return false
}
return flag.NoOptDefVal != ""
}
func stripFlags(args []string, c *Command) []string {
if len(args) == 0 {
return args
}
c.mergePersistentFlags()
commands := []string{}
flags := c.Flags()
Loop:
for len(args) > 0 {
s := args[0]
args = args[1:]
switch {
case s == "--":
// "--" terminates the flags
break Loop
case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
// If '--flag arg' then
// delete arg from args.
fallthrough // (do the same as below)
case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
// If '-f arg' then
// delete 'arg' from args or break the loop if len(args) <= 1.
if len(args) <= 1 {
break Loop
} else {
args = args[1:]
continue
}
case s != "" && !strings.HasPrefix(s, "-"):
commands = append(commands, s)
}
}
return commands
}
// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
func argsMinusFirstX(args []string, x string) []string {
for i, y := range args {
if x == y {
ret := []string{}
ret = append(ret, args[:i]...)
ret = append(ret, args[i+1:]...)
return ret
}
}
return args
}
func isFlagArg(arg string) bool {
return ((len(arg) >= 3 && arg[1] == '-') ||
(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
}
// Find the target command given the args and command tree
// Meant to be run on the highest node. Only searches down.
func (c *Command) Find(args []string) (*Command, []string, error) {
var innerfind func(*Command, []string) (*Command, []string)
innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
argsWOflags := stripFlags(innerArgs, c)
if len(argsWOflags) == 0 {
return c, innerArgs
}
nextSubCmd := argsWOflags[0]
cmd := c.findNext(nextSubCmd)
if cmd != nil {
return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
}
return c, innerArgs
}
commandFound, a := innerfind(c, args)
if commandFound.Args == nil {
return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
}
return commandFound, a, nil
}
func (c *Command) findSuggestions(arg string) string {
if c.DisableSuggestions {
return ""
}
if c.SuggestionsMinimumDistance <= 0 {
c.SuggestionsMinimumDistance = 2
}
suggestionsString := ""
if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
suggestionsString += "\n\nDid you mean this?\n"
for _, s := range suggestions {
suggestionsString += fmt.Sprintf("\t%v\n", s)
}
}
return suggestionsString
}
func (c *Command) findNext(next string) *Command {
matches := make([]*Command, 0)
for _, cmd := range c.commands {
if cmd.Name() == next || cmd.HasAlias(next) {
cmd.commandCalledAs.name = next
return cmd
}
if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
matches = append(matches, cmd)
}
}
if len(matches) == 1 {
return matches[0]
}
return nil
}
// Traverse the command tree to find the command, and parse args for
// each parent.
func (c *Command) Traverse(args []string) (*Command, []string, error) {
flags := []string{}
inFlag := false
for i, arg := range args {
switch {
// A long flag with a space separated value
case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
flags = append(flags, arg)
continue
// A short flag with a space separated value
case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
inFlag = true
flags = append(flags, arg)
continue
// The value for a flag
case inFlag:
inFlag = false
flags = append(flags, arg)
continue
// A flag without a value, or with an `=` separated value
case isFlagArg(arg):
flags = append(flags, arg)
continue
}
cmd := c.findNext(arg)
if cmd == nil {
return c, args, nil
}
if err := c.ParseFlags(flags); err != nil {
return nil, args, err
}
return cmd.Traverse(args[i+1:])
}
return c, args, nil
}
// SuggestionsFor provides suggestions for the typedName.
func (c *Command) SuggestionsFor(typedName string) []string {
suggestions := []string{}
for _, cmd := range c.commands {
if cmd.IsAvailableCommand() {
levenshteinDistance := ld(typedName, cmd.Name(), true)
suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
if suggestByLevenshtein || suggestByPrefix {
suggestions = append(suggestions, cmd.Name())
}
for _, explicitSuggestion := range cmd.SuggestFor {
if strings.EqualFold(typedName, explicitSuggestion) {
suggestions = append(suggestions, cmd.Name())
}
}
}
}
return suggestions
}
// VisitParents visits all parents of the command and invokes fn on each parent.
func (c *Command) VisitParents(fn func(*Command)) {
if c.HasParent() {
fn(c.Parent())
c.Parent().VisitParents(fn)
}
}
// Root finds root command.
func (c *Command) Root() *Command {
if c.HasParent() {
return c.Parent().Root()
}
return c
}
// ArgsLenAtDash will return the length of c.Flags().Args at the moment
// when a -- was found during args parsing.
func (c *Command) ArgsLenAtDash() int {
return c.Flags().ArgsLenAtDash()
}
func (c *Command) execute(a []string) (err error) {
if c == nil {
return fmt.Errorf("Called Execute() on a nil Command")
}
if len(c.Deprecated) > 0 {
c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
}
// initialize help and version flag at the last point possible to allow for user
// overriding
c.InitDefaultHelpFlag()
c.InitDefaultVersionFlag()
err = c.ParseFlags(a)
if err != nil {
return c.FlagErrorFunc()(c, err)
}
// If help is called, regardless of other flags, return we want help.
// Also say we need help if the command isn't runnable.
helpVal, err := c.Flags().GetBool("help")
if err != nil {
// should be impossible to get here as we always declare a help
// flag in InitDefaultHelpFlag()
c.Println("\"help\" flag declared as non-bool. Please correct your code")
return err
}
if helpVal {
return flag.ErrHelp
}
// for back-compat, only add version flag behavior if version is defined
if c.Version != "" {
versionVal, err := c.Flags().GetBool("version")
if err != nil {
c.Println("\"version\" flag declared as non-bool. Please correct your code")
return err
}
if versionVal {
err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
if err != nil {
c.Println(err)
}
return err
}
}
if !c.Runnable() {
return flag.ErrHelp
}
c.preRun()
argWoFlags := c.Flags().Args()
if c.DisableFlagParsing {
argWoFlags = a
}
if err := c.ValidateArgs(argWoFlags); err != nil {
return err
}
for p := c; p != nil; p = p.Parent() {
if p.PersistentPreRunE != nil {
if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
return err
}
break
} else if p.PersistentPreRun != nil {
p.PersistentPreRun(c, argWoFlags)
break
}
}
if c.PreRunE != nil {
if err := c.PreRunE(c, argWoFlags); err != nil {
return err
}
} else if c.PreRun != nil {
c.PreRun(c, argWoFlags)
}
if err := c.validateRequiredFlags(); err != nil {
return err
}
if c.RunE != nil {
if err := c.RunE(c, argWoFlags); err != nil {
return err
}
} else {
c.Run(c, argWoFlags)
}
if c.PostRunE != nil {
if err := c.PostRunE(c, argWoFlags); err != nil {
return err
}
} else if c.PostRun != nil {
c.PostRun(c, argWoFlags)
}
for p := c; p != nil; p = p.Parent() {
if p.PersistentPostRunE != nil {
if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
return err
}
break
} else if p.PersistentPostRun != nil {
p.PersistentPostRun(c, argWoFlags)
break
}
}
return nil
}
func (c *Command) preRun() {
for _, x := range initializers {
x()
}
}
// ExecuteContext is the same as Execute(), but sets the ctx on the command.
// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
func (c *Command) ExecuteContext(ctx context.Context) error {
c.ctx = ctx
return c.Execute()
}
// Execute uses the args (os.Args[1:] by default)
// and run through the command tree finding appropriate matches
// for commands and then corresponding flags.
func (c *Command) Execute() error {
_, err := c.ExecuteC()
return err
}
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
if c.ctx == nil {
c.ctx = context.Background()
}
// Regardless of what command execute is called on, run on Root only
if c.HasParent() {
return c.Root().ExecuteC()
}
// windows hook
if preExecHookFn != nil {
preExecHookFn(c)
}
// initialize help as the last point possible to allow for user
// overriding
c.InitDefaultHelpCmd()
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
}
// initialize the hidden command to be used for bash completion
c.initCompleteCmd(args)
var flags []string
if c.TraverseChildren {
cmd, flags, err = c.Traverse(args)
} else {
cmd, flags, err = c.Find(args)
}
if err != nil {
// If found parse to a subcommand and then failed, talk about the subcommand
if cmd != nil {
c = cmd
}
if !c.SilenceErrors {
c.PrintErrln("Error:", err.Error())
c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
}
return c, err
}
cmd.commandCalledAs.called = true
if cmd.commandCalledAs.name == "" {
cmd.commandCalledAs.name = cmd.Name()
}
// We have to pass global context to children command
// if context is present on the parent command.
if cmd.ctx == nil {
cmd.ctx = c.ctx
}
err = cmd.execute(flags)
if err != nil {
// Always show help if requested, even if SilenceErrors is in
// effect
if err == flag.ErrHelp {
cmd.HelpFunc()(cmd, args)
return cmd, nil
}
// If root command has SilentErrors flagged,
// all subcommands should respect it
if !cmd.SilenceErrors && !c.SilenceErrors {
c.PrintErrln("Error:", err.Error())
}
// If root command has SilentUsage flagged,
// all subcommands should respect it
if !cmd.SilenceUsage && !c.SilenceUsage {
c.Println(cmd.UsageString())
}
}
return cmd, err
}
func (c *Command) ValidateArgs(args []string) error {
if c.Args == nil {
return nil
}
return c.Args(c, args)
}
func (c *Command) validateRequiredFlags() error {
if c.DisableFlagParsing {
return nil
}
flags := c.Flags()
missingFlagNames := []string{}
flags.VisitAll(func(pflag *flag.Flag) {
requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
if !found {
return
}
if (requiredAnnotation[0] == "true") && !pflag.Changed {
missingFlagNames = append(missingFlagNames, pflag.Name)
}
})
if len(missingFlagNames) > 0 {
return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
}
return nil
}
// InitDefaultHelpFlag adds default help flag to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help flag, it will do nothing.
func (c *Command) InitDefaultHelpFlag() {
c.mergePersistentFlags()
if c.Flags().Lookup("help") == nil {
usage := "help for "
if c.Name() == "" {
usage += "this command"
} else {
usage += c.Name()
}
c.Flags().BoolP("help", "h", false, usage)
}
}
// InitDefaultVersionFlag adds default version flag to c.
// It is called automatically by executing the c.
// If c already has a version flag, it will do nothing.
// If c.Version is empty, it will do nothing.
func (c *Command) InitDefaultVersionFlag() {
if c.Version == "" {
return
}
c.mergePersistentFlags()
if c.Flags().Lookup("version") == nil {
usage := "version for "
if c.Name() == "" {
usage += "this command"
} else {
usage += c.Name()
}
if c.Flags().ShorthandLookup("v") == nil {
c.Flags().BoolP("version", "v", false, usage)
} else {
c.Flags().Bool("version", false, usage)
}
}
}
// InitDefaultHelpCmd adds default help command to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help command or c has no subcommands, it will do nothing.
func (c *Command) InitDefaultHelpCmd() {
if !c.HasSubCommands() {
return
}
if c.helpCommand == nil {
c.helpCommand = &Command{
Use: "help [command]",
Short: "Help about any command",
Long: `Help provides help for any command in the application.
Simply type ` + c.Name() + ` help [path to command] for full details.`,
ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
var completions []string
cmd, _, e := c.Root().Find(args)
if e != nil {
return nil, ShellCompDirectiveNoFileComp
}
if cmd == nil {
// Root help command.
cmd = c.Root()
}
for _, subCmd := range cmd.Commands() {
if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
if strings.HasPrefix(subCmd.Name(), toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
}
}
}
return completions, ShellCompDirectiveNoFileComp
},
Run: func(c *Command, args []string) {
cmd, _, e := c.Root().Find(args)
if cmd == nil || e != nil {
c.Printf("Unknown help topic %#q\n", args)
c.Root().Usage()
} else {
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
cmd.Help()
}
},
}
}
c.RemoveCommand(c.helpCommand)
c.AddCommand(c.helpCommand)
}
// ResetCommands delete parent, subcommand and help command from c.
func (c *Command) ResetCommands() {
c.parent = nil
c.commands = nil
c.helpCommand = nil
c.parentsPflags = nil
}
// Sorts commands by their names.
type commandSorterByName []*Command
func (c commandSorterByName) Len() int { return len(c) }
func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
// Commands returns a sorted slice of child commands.
func (c *Command) Commands() []*Command {
// do not sort commands if it already sorted or sorting was disabled
if EnableCommandSorting && !c.commandsAreSorted {
sort.Sort(commandSorterByName(c.commands))
c.commandsAreSorted = true
}
return c.commands
}
// AddCommand adds one or more commands to this parent command.
func (c *Command) AddCommand(cmds ...*Command) {
for i, x := range cmds {
if cmds[i] == c {
panic("Command can't be a child of itself")
}
cmds[i].parent = c
// update max lengths
usageLen := len(x.Use)
if usageLen > c.commandsMaxUseLen {
c.commandsMaxUseLen = usageLen
}
commandPathLen := len(x.CommandPath())
if commandPathLen > c.commandsMaxCommandPathLen {
c.commandsMaxCommandPathLen = commandPathLen
}
nameLen := len(x.Name())
if nameLen > c.commandsMaxNameLen {
c.commandsMaxNameLen = nameLen
}
// If global normalization function exists, update all children
if c.globNormFunc != nil {
x.SetGlobalNormalizationFunc(c.globNormFunc)
}
c.commands = append(c.commands, x)
c.commandsAreSorted = false
}
}
// RemoveCommand removes one or more commands from a parent command.
func (c *Command) RemoveCommand(cmds ...*Command) {
commands := []*Command{}
main:
for _, command := range c.commands {
for _, cmd := range cmds {
if command == cmd {
command.parent = nil
continue main
}
}
commands = append(commands, command)
}
c.commands = commands
// recompute all lengths
c.commandsMaxUseLen = 0
c.commandsMaxCommandPathLen = 0
c.commandsMaxNameLen = 0
for _, command := range c.commands {
usageLen := len(command.Use)
if usageLen > c.commandsMaxUseLen {
c.commandsMaxUseLen = usageLen
}
commandPathLen := len(command.CommandPath())
if commandPathLen > c.commandsMaxCommandPathLen {
c.commandsMaxCommandPathLen = commandPathLen
}
nameLen := len(command.Name())
if nameLen > c.commandsMaxNameLen {
c.commandsMaxNameLen = nameLen
}
}
}
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
func (c *Command) Print(i ...interface{}) {
fmt.Fprint(c.OutOrStderr(), i...)
}
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
func (c *Command) Println(i ...interface{}) {
c.Print(fmt.Sprintln(i...))
}
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
func (c *Command) Printf(format string, i ...interface{}) {
c.Print(fmt.Sprintf(format, i...))
}
// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErr(i ...interface{}) {
fmt.Fprint(c.ErrOrStderr(), i...)
}
// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErrln(i ...interface{}) {
c.PrintErr(fmt.Sprintln(i...))
}
// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErrf(format string, i ...interface{}) {
c.PrintErr(fmt.Sprintf(format, i...))
}
// CommandPath returns the full path to this command.
func (c *Command) CommandPath() string {
if c.HasParent() {
return c.Parent().CommandPath() + " " + c.Name()
}
return c.Name()
}
// UseLine puts out the full usage for a given command (including parents).
func (c *Command) UseLine() string {
var useline string
if c.HasParent() {
useline = c.parent.CommandPath() + " " + c.Use
} else {
useline = c.Use
}
if c.DisableFlagsInUseLine {
return useline
}
if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
useline += " [flags]"
}
return useline
}
// DebugFlags used to determine which flags have been assigned to which commands
// and which persist.
func (c *Command) DebugFlags() {
c.Println("DebugFlags called on", c.Name())
var debugflags func(*Command)
debugflags = func(x *Command) {
if x.HasFlags() || x.HasPersistentFlags() {
c.Println(x.Name())
}
if x.HasFlags() {
x.flags.VisitAll(func(f *flag.Flag) {
if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
} else {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
}
})
}
if x.HasPersistentFlags() {
x.pflags.VisitAll(func(f *flag.Flag) {
if x.HasFlags() {
if x.flags.Lookup(f.Name) == nil {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
}
} else {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
}
})
}
c.Println(x.flagErrorBuf)
if x.HasSubCommands() {
for _, y := range x.commands {
debugflags(y)
}
}
}
debugflags(c)
}
// Name returns the command's name: the first word in the use line.
func (c *Command) Name() string {
name := c.Use
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// HasAlias determines if a given string is an alias of the command.
func (c *Command) HasAlias(s string) bool {
for _, a := range c.Aliases {
if a == s {
return true
}
}
return false
}
// CalledAs returns the command name or alias that was used to invoke
// this command or an empty string if the command has not been called.
func (c *Command) CalledAs() string {
if c.commandCalledAs.called {
return c.commandCalledAs.name
}
return ""
}
// hasNameOrAliasPrefix returns true if the Name or any of aliases start
// with prefix
func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
if strings.HasPrefix(c.Name(), prefix) {
c.commandCalledAs.name = c.Name()
return true
}
for _, alias := range c.Aliases {
if strings.HasPrefix(alias, prefix) {
c.commandCalledAs.name = alias
return true
}
}
return false
}
// NameAndAliases returns a list of the command name and all aliases
func (c *Command) NameAndAliases() string {
return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
}
// HasExample determines if the command has example.
func (c *Command) HasExample() bool {
return len(c.Example) > 0
}
// Runnable determines if the command is itself runnable.
func (c *Command) Runnable() bool {
return c.Run != nil || c.RunE != nil
}
// HasSubCommands determines if the command has children commands.
func (c *Command) HasSubCommands() bool {
return len(c.commands) > 0
}
// IsAvailableCommand determines if a command is available as a non-help command
// (this includes all non deprecated/hidden commands).
func (c *Command) IsAvailableCommand() bool {
if len(c.Deprecated) != 0 || c.Hidden {
return false
}
if c.HasParent() && c.Parent().helpCommand == c {
return false
}
if c.Runnable() || c.HasAvailableSubCommands() {
return true
}
return false
}
// IsAdditionalHelpTopicCommand determines if a command is an additional
// help topic command; additional help topic command is determined by the
// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
// are runnable/hidden/deprecated.
// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
func (c *Command) IsAdditionalHelpTopicCommand() bool {
// if a command is runnable, deprecated, or hidden it is not a 'help' command
if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
return false
}
// if any non-help sub commands are found, the command is not a 'help' command
for _, sub := range c.commands {
if !sub.IsAdditionalHelpTopicCommand() {
return false
}
}
// the command either has no sub commands, or no non-help sub commands
return true
}
// HasHelpSubCommands determines if a command has any available 'help' sub commands
// that need to be shown in the usage/help default template under 'additional help
// topics'.
func (c *Command) HasHelpSubCommands() bool {
// return true on the first found available 'help' sub command
for _, sub := range c.commands {
if sub.IsAdditionalHelpTopicCommand() {
return true
}
}
// the command either has no sub commands, or no available 'help' sub commands
return false
}
// HasAvailableSubCommands determines if a command has available sub commands that
// need to be shown in the usage/help default template under 'available commands'.
func (c *Command) HasAvailableSubCommands() bool {
// return true on the first found available (non deprecated/help/hidden)
// sub command
for _, sub := range c.commands {
if sub.IsAvailableCommand() {
return true
}
}
// the command either has no sub commands, or no available (non deprecated/help/hidden)
// sub commands
return false
}
// HasParent determines if the command is a child command.
func (c *Command) HasParent() bool {
return c.parent != nil
}
// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
return c.globNormFunc
}
// Flags returns the complete FlagSet that applies
// to this command (local and persistent declared here and by all parents).
func (c *Command) Flags() *flag.FlagSet {
if c.flags == nil {
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.flags.SetOutput(c.flagErrorBuf)
}
return c.flags
}
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
persistentFlags := c.PersistentFlags()
out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.LocalFlags().VisitAll(func(f *flag.Flag) {
if persistentFlags.Lookup(f.Name) == nil {
out.AddFlag(f)
}
})
return out
}
// LocalFlags returns the local FlagSet specifically set in the current command.
func (c *Command) LocalFlags() *flag.FlagSet {
c.mergePersistentFlags()
if c.lflags == nil {
c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.lflags.SetOutput(c.flagErrorBuf)
}
c.lflags.SortFlags = c.Flags().SortFlags
if c.globNormFunc != nil {
c.lflags.SetNormalizeFunc(c.globNormFunc)
}
addToLocal := func(f *flag.Flag) {
if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
c.lflags.AddFlag(f)
}
}
c.Flags().VisitAll(addToLocal)
c.PersistentFlags().VisitAll(addToLocal)
return c.lflags
}
// InheritedFlags returns all flags which were inherited from parent commands.
func (c *Command) InheritedFlags() *flag.FlagSet {
c.mergePersistentFlags()
if c.iflags == nil {
c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.iflags.SetOutput(c.flagErrorBuf)
}
local := c.LocalFlags()
if c.globNormFunc != nil {
c.iflags.SetNormalizeFunc(c.globNormFunc)
}
c.parentsPflags.VisitAll(func(f *flag.Flag) {
if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
c.iflags.AddFlag(f)
}
})
return c.iflags
}
// NonInheritedFlags returns all flags which were not inherited from parent commands.
func (c *Command) NonInheritedFlags() *flag.FlagSet {
return c.LocalFlags()
}
// PersistentFlags returns the persistent FlagSet specifically set in the current command.
func (c *Command) PersistentFlags() *flag.FlagSet {
if c.pflags == nil {
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.pflags.SetOutput(c.flagErrorBuf)
}
return c.pflags
}
// ResetFlags deletes all flags from command.
func (c *Command) ResetFlags() {
c.flagErrorBuf = new(bytes.Buffer)
c.flagErrorBuf.Reset()
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.flags.SetOutput(c.flagErrorBuf)
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.pflags.SetOutput(c.flagErrorBuf)
c.lflags = nil
c.iflags = nil
c.parentsPflags = nil
}
// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
func (c *Command) HasFlags() bool {
return c.Flags().HasFlags()
}
// HasPersistentFlags checks if the command contains persistent flags.
func (c *Command) HasPersistentFlags() bool {
return c.PersistentFlags().HasFlags()
}
// HasLocalFlags checks if the command has flags specifically declared locally.
func (c *Command) HasLocalFlags() bool {
return c.LocalFlags().HasFlags()
}
// HasInheritedFlags checks if the command has flags inherited from its parent command.
func (c *Command) HasInheritedFlags() bool {
return c.InheritedFlags().HasFlags()
}
// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
// structure) which are not hidden or deprecated.
func (c *Command) HasAvailableFlags() bool {
return c.Flags().HasAvailableFlags()
}
// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
func (c *Command) HasAvailablePersistentFlags() bool {
return c.PersistentFlags().HasAvailableFlags()
}
// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
// or deprecated.
func (c *Command) HasAvailableLocalFlags() bool {
return c.LocalFlags().HasAvailableFlags()
}
// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
// not hidden or deprecated.
func (c *Command) HasAvailableInheritedFlags() bool {
return c.InheritedFlags().HasAvailableFlags()
}
// Flag climbs up the command tree looking for matching flag.
func (c *Command) Flag(name string) (flag *flag.Flag) {
flag = c.Flags().Lookup(name)
if flag == nil {
flag = c.persistentFlag(name)
}
return
}
// Recursively find matching persistent flag.
func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
if c.HasPersistentFlags() {
flag = c.PersistentFlags().Lookup(name)
}
if flag == nil {
c.updateParentsPflags()
flag = c.parentsPflags.Lookup(name)
}
return
}
// ParseFlags parses persistent flag tree and local flags.
func (c *Command) ParseFlags(args []string) error {
if c.DisableFlagParsing {
return nil
}
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
beforeErrorBufLen := c.flagErrorBuf.Len()
c.mergePersistentFlags()
// do it here after merging all flags and just before parse
c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
err := c.Flags().Parse(args)
// Print warnings if they occurred (e.g. deprecated flag messages).
if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
c.Print(c.flagErrorBuf.String())
}
return err
}
// Parent returns a commands parent command.
func (c *Command) Parent() *Command {
return c.parent
}
// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
// and adds missing persistent flags of all parents.
func (c *Command) mergePersistentFlags() {
c.updateParentsPflags()
c.Flags().AddFlagSet(c.PersistentFlags())
c.Flags().AddFlagSet(c.parentsPflags)
}
// updateParentsPflags updates c.parentsPflags by adding
// new persistent flags of all parents.
// If c.parentsPflags == nil, it makes new.
func (c *Command) updateParentsPflags() {
if c.parentsPflags == nil {
c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.parentsPflags.SetOutput(c.flagErrorBuf)
c.parentsPflags.SortFlags = false
}
if c.globNormFunc != nil {
c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
}
c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
c.VisitParents(func(parent *Command) {
c.parentsPflags.AddFlagSet(parent.PersistentFlags())
})
}
| 8,556 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/shell_completions.md | # Generating shell completions
Cobra can generate shell completions for multiple shells.
The currently supported shells are:
- Bash
- Zsh
- Fish
- PowerShell
If you are using the generator you can create a completion command by running
```bash
cobra add completion
```
and then modifying the generated `cmd/completion.go` file to look something like this
(writing the shell script to stdout allows the most flexible use):
```go
var completionCmd = &cobra.Command{
Use: "completion [bash|zsh|fish|powershell]",
Short: "Generate completion script",
Long: `To load completions:
Bash:
$ source <(yourprogram completion bash)
# To load completions for each session, execute once:
Linux:
$ yourprogram completion bash > /etc/bash_completion.d/yourprogram
MacOS:
$ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
Zsh:
# If shell completion is not already enabled in your environment you will need
# to enable it. You can execute the following once:
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
# To load completions for each session, execute once:
$ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
# You will need to start a new shell for this setup to take effect.
Fish:
$ yourprogram completion fish | source
# To load completions for each session, execute once:
$ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
`,
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.ExactValidArgs(1),
Run: func(cmd *cobra.Command, args []string) {
switch args[0] {
case "bash":
cmd.Root().GenBashCompletion(os.Stdout)
case "zsh":
cmd.Root().GenZshCompletion(os.Stdout)
case "fish":
cmd.Root().GenFishCompletion(os.Stdout, true)
case "powershell":
cmd.Root().GenPowerShellCompletion(os.Stdout)
}
},
}
```
**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script so must be removed.
# Customizing completions
The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values.
## Completion of nouns
### Static completion of nouns
Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field.
For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them.
Some simplified code from `kubectl get` looks like:
```go
validArgs []string = { "pod", "node", "service", "replicationcontroller" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
Short: "Display one or many resources",
Long: get_long,
Example: get_example,
Run: func(cmd *cobra.Command, args []string) {
err := RunGet(f, out, cmd, args)
util.CheckErr(err)
},
ValidArgs: validArgs,
}
```
Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like:
```bash
$ kubectl get [tab][tab]
node pod replicationcontroller service
```
#### Aliases for nouns
If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
```go
argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
cmd := &cobra.Command{
...
ValidArgs: validArgs,
ArgAliases: argAliases
}
```
The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
the completion algorithm if entered manually, e.g. in:
```bash
$ kubectl get rc [tab][tab]
backend frontend database
```
Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
replication controllers following `rc`.
### Dynamic completion of nouns
In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
Simplified code from `helm status` looks like:
```go
cmd := &cobra.Command{
Use: "status RELEASE_NAME",
Short: "Display the status of the named release",
Long: status_long,
RunE: func(cmd *cobra.Command, args []string) {
RunGet(args[0])
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
},
}
```
Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like:
```bash
$ helm status [tab][tab]
harbor notary rook thanos
```
You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
```go
// Indicates that the shell will perform its default behavior after completions
// have been provided (this implies none of the other directives).
ShellCompDirectiveDefault
// Indicates an error occurred and completions should be ignored.
ShellCompDirectiveError
// Indicates that the shell should not add a space after the completion,
// even if there is a single completion provided.
ShellCompDirectiveNoSpace
// Indicates that the shell should not provide file completion even when
// no completion is provided.
ShellCompDirectiveNoFileComp
// Indicates that the returned completions should be used as file extension filters.
// For example, to complete only files of the form *.json or *.yaml:
// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt
// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename()
// is a shortcut to using this directive explicitly.
//
ShellCompDirectiveFilterFileExt
// Indicates that only directory names should be provided in file completion.
// For example:
// return nil, ShellCompDirectiveFilterDirs
// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly.
//
// To request directory names within another directory, the returned completions
// should specify a single directory name within which to search. For example,
// to complete directories within "themes/":
// return []string{"themes"}, ShellCompDirectiveFilterDirs
//
ShellCompDirectiveFilterDirs
```
***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
#### Debugging
Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
```bash
$ helm __complete status har<ENTER>
harbor
:4
Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
```
***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command:
```bash
$ helm __complete status ""<ENTER>
harbor
notary
rook
thanos
:4
Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
```
Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
```go
// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
// is set to a file path) and optionally prints to stderr.
cobra.CompDebug(msg string, printToStdErr bool) {
cobra.CompDebugln(msg string, printToStdErr bool)
// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
// is set to a file path) and to stderr.
cobra.CompError(msg string)
cobra.CompErrorln(msg string)
```
***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
## Completions for flags
### Mark flags as required
Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so:
```go
cmd.MarkFlagRequired("pod")
cmd.MarkFlagRequired("container")
```
and you'll get something like
```bash
$ kubectl exec [tab][tab]
-c --container= -p --pod=
```
### Specify dynamic flag completion
As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function.
```go
flagName := "output"
cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
})
```
Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
```bash
$ helm status --output [tab][tab]
json table yaml
```
#### Debugging
You can also easily debug your Go completion code for flags:
```bash
$ helm __complete status --output ""
json
table
yaml
:4
Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
```
***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above.
### Specify valid filename extensions for flags that take a filename
To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so:
```go
flagName := "output"
cmd.MarkFlagFilename(flagName, "yaml", "json")
```
or
```go
flagName := "output"
cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt})
```
### Limit flag completions to directory names
To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so:
```go
flagName := "output"
cmd.MarkFlagDirname(flagName)
```
or
```go
flagName := "output"
cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveFilterDirs
})
```
To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so:
```go
flagName := "output"
cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs
})
```
### Descriptions for completions
Both `zsh` and `fish` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh:
```
$ helm s[tab]
search -- search for a keyword in charts
show -- show information of a chart
status -- displays the status of the named release
```
while using fish:
```
$ helm s[tab]
search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
```
Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example:
```go
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp
}}
```
or
```go
ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"}
```
## Bash completions
### Dependencies
The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion))
### Aliases
You can also configure `bash` aliases for your program and they will also support completions.
```bash
alias aliasname=origcommand
complete -o default -F __start_origcommand aliasname
# and now when you run `aliasname` completion will make
# suggestions as it did for `origcommand`.
$ aliasname <tab><tab>
completion firstcommand secondcommand
```
### Bash legacy dynamic completions
For backwards-compatibility, Cobra still supports its bash legacy dynamic completion solution.
Please refer to [Bash Completions](bash_completions.md) for details.
## Zsh completions
Cobra supports native Zsh completion generated from the root `cobra.Command`.
The generated completion script should be put somewhere in your `$fpath` and be named
`_<yourProgram>`. You will need to start a new shell for the completions to become available.
Zsh supports descriptions for completions. Cobra will provide the description automatically,
based on usage information. Cobra provides a way to completely disable such descriptions by
using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make
this a configurable option to your users.
```
# With descriptions
$ helm s[tab]
search -- search for a keyword in charts
show -- show information of a chart
status -- displays the status of the named release
# Without descriptions
$ helm s[tab]
search show status
```
*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`.
### Limitations
* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation).
* You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`).
* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`.
* You should instead use `RegisterFlagCompletionFunc()`.
### Zsh completions standardization
Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced.
Please refer to [Zsh Completions](zsh_completions.md) for details.
## Fish completions
Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
```
# With descriptions
$ helm s[tab]
search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
# Without descriptions
$ helm s[tab]
search show status
```
*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`.
### Limitations
* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation).
* You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`).
* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`.
* You should instead use `RegisterFlagCompletionFunc()`.
* The following flag completion annotations are not supported and will be ignored for `fish`:
* `BashCompFilenameExt` (filtering by file extension)
* `BashCompSubdirsInDir` (filtering by directory)
* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`:
* `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
* `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
* Similarly, the following completion directives are not supported and will be ignored for `fish`:
* `ShellCompDirectiveFilterFileExt` (filtering by file extension)
* `ShellCompDirectiveFilterDirs` (filtering by directory)
## PowerShell completions
Please refer to [PowerShell Completions](powershell_completions.md) for details.
| 8,557 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/fish_completions.go | package cobra
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
nameForVar = strings.Replace(nameForVar, "-", "_", -1)
nameForVar = strings.Replace(nameForVar, ":", "_", -1)
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
buf.WriteString(fmt.Sprintf(`
function __%[1]s_debug
set file "$BASH_COMP_DEBUG_FILE"
if test -n "$file"
echo "$argv" >> $file
end
end
function __%[1]s_perform_completion
__%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
set args (string split -- " " "$argv")
set lastArg "$args[-1]"
__%[1]s_debug "args: $args"
__%[1]s_debug "last arg: $lastArg"
set emptyArg ""
if test -z "$lastArg"
__%[1]s_debug "Setting emptyArg"
set emptyArg \"\"
end
__%[1]s_debug "emptyArg: $emptyArg"
if not type -q "$args[1]"
# This can happen when "complete --do-complete %[2]s" is called when running this script.
__%[1]s_debug "Cannot find $args[1]. No completions."
return
end
set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg"
__%[1]s_debug "Calling $requestComp"
set results (eval $requestComp 2> /dev/null)
set comps $results[1..-2]
set directiveLine $results[-1]
# For Fish, when completing a flag with an = (e.g., <program> -n=<TAB>)
# completions must be prefixed with the flag
set flagPrefix (string match -r -- '-.*=' "$lastArg")
__%[1]s_debug "Comps: $comps"
__%[1]s_debug "DirectiveLine: $directiveLine"
__%[1]s_debug "flagPrefix: $flagPrefix"
for comp in $comps
printf "%%s%%s\n" "$flagPrefix" "$comp"
end
printf "%%s\n" "$directiveLine"
end
# This function does three things:
# 1- Obtain the completions and store them in the global __%[1]s_comp_results
# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
# and unset it otherwise
# 3- Return true if the completion results are not empty
function __%[1]s_prepare_completions
# Start fresh
set --erase __%[1]s_comp_do_file_comp
set --erase __%[1]s_comp_results
# Check if the command-line is already provided. This is useful for testing.
if not set --query __%[1]s_comp_commandLine
# Use the -c flag to allow for completion in the middle of the line
set __%[1]s_comp_commandLine (commandline -c)
end
__%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
set --erase __%[1]s_comp_commandLine
__%[1]s_debug "Completion results: $results"
if test -z "$results"
__%[1]s_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
set --global __%[1]s_comp_do_file_comp 1
return 1
end
set directive (string sub --start 2 $results[-1])
set --global __%[1]s_comp_results $results[1..-2]
__%[1]s_debug "Completions are: $__%[1]s_comp_results"
__%[1]s_debug "Directive is: $directive"
set shellCompDirectiveError %[4]d
set shellCompDirectiveNoSpace %[5]d
set shellCompDirectiveNoFileComp %[6]d
set shellCompDirectiveFilterFileExt %[7]d
set shellCompDirectiveFilterDirs %[8]d
if test -z "$directive"
set directive 0
end
set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
if test $compErr -eq 1
__%[1]s_debug "Received error directive: aborting."
# Might as well do file completion, in case it helps
set --global __%[1]s_comp_do_file_comp 1
return 1
end
set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
if test $filefilter -eq 1; or test $dirfilter -eq 1
__%[1]s_debug "File extension filtering or directory filtering not supported"
# Do full file completion instead
set --global __%[1]s_comp_do_file_comp 1
return 1
end
set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
__%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
# Important not to quote the variable for count to work
set numComps (count $__%[1]s_comp_results)
__%[1]s_debug "numComps: $numComps"
if test $numComps -eq 1; and test $nospace -ne 0
# To support the "nospace" directive we trick the shell
# by outputting an extra, longer completion.
__%[1]s_debug "Adding second completion to perform nospace directive"
set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
end
if test $numComps -eq 0; and test $nofiles -eq 0
__%[1]s_debug "Requesting file completion"
set --global __%[1]s_comp_do_file_comp 1
end
# If we don't want file completion, we must return true even if there
# are no completions found. This is because fish will perform the last
# completion command, even if its condition is false, if no other
# completion command was triggered
return (not set --query __%[1]s_comp_do_file_comp)
end
# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
# so we can properly delete any completions provided by another script.
# The space after the the program name is essential to trigger completion for the program
# and not completion of the program name itself.
complete --do-complete "%[2]s " > /dev/null 2>&1
# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c %[2]s -e
# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
#
# This completion will be run second as complete commands are added FILO.
# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp'
# This completion will be run first as complete commands are added FILO.
# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp.
# It provides the program's completion choices.
complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genFishComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
// GenFishCompletionFile generates fish completion file.
func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenFishCompletion(outFile, includeDesc)
}
| 8,558 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/command_win.go | // +build windows
package cobra
import (
"fmt"
"os"
"time"
"github.com/inconshreveable/mousetrap"
)
var preExecHookFn = preExecHook
func preExecHook(c *Command) {
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
c.Print(MousetrapHelpText)
if MousetrapDisplayDuration > 0 {
time.Sleep(MousetrapDisplayDuration)
} else {
c.Println("Press return to continue...")
fmt.Scanln()
}
os.Exit(1)
}
}
| 8,559 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/command_notwin.go | // +build !windows
package cobra
var preExecHookFn func(*Command)
| 8,560 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/.mailmap | Steve Francia <[email protected]>
Bjørn Erik Pedersen <[email protected]>
Fabiano Franz <[email protected]> <[email protected]>
| 8,561 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/cobra.go | // Copyright © 2013 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Commands similar to git, go tools and other modern CLI tools
// inspired by go, go-Commander, gh and subcommand
package cobra
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"text/template"
"time"
"unicode"
)
var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent,
"rpad": rpad,
"gt": Gt,
"eq": Eq,
}
var initializers []func()
// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
// to automatically enable in CLI tools.
// Set this to true to enable it.
var EnablePrefixMatching = false
// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
// To disable sorting, set it to false.
var EnableCommandSorting = true
// MousetrapHelpText enables an information splash screen on Windows
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
var MousetrapHelpText = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
// Works only on Microsoft Windows.
var MousetrapDisplayDuration = 5 * time.Second
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
templateFuncs[name] = tmplFunc
}
// AddTemplateFuncs adds multiple template functions that are available to Usage and
// Help template generation.
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
for k, v := range tmplFuncs {
templateFuncs[k] = v
}
}
// OnInitialize sets the passed functions to be run when each command's
// Execute method is called.
func OnInitialize(y ...func()) {
initializers = append(initializers, y...)
}
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
// ints and then compared.
func Gt(a interface{}, b interface{}) bool {
var left, right int64
av := reflect.ValueOf(a)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
left = int64(av.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
left = av.Int()
case reflect.String:
left, _ = strconv.ParseInt(av.String(), 10, 64)
}
bv := reflect.ValueOf(b)
switch bv.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
right = int64(bv.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
right = bv.Int()
case reflect.String:
right, _ = strconv.ParseInt(bv.String(), 10, 64)
}
return left > right
}
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
panic("Eq called on unsupported type")
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return av.Int() == bv.Int()
case reflect.String:
return av.String() == bv.String()
}
return false
}
func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
func appendIfNotPresent(s, stringToAppend string) string {
if strings.Contains(s, stringToAppend) {
return s
}
return s + " " + stringToAppend
}
// rpad adds padding to the right of a string.
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(templateFuncs)
template.Must(t.Parse(text))
return t.Execute(w, data)
}
// ld compares two strings and returns the levenshtein distance between them.
func ld(s, t string, ignoreCase bool) int {
if ignoreCase {
s = strings.ToLower(s)
t = strings.ToLower(t)
}
d := make([][]int, len(s)+1)
for i := range d {
d[i] = make([]int, len(t)+1)
}
for i := range d {
d[i][0] = i
}
for j := range d[0] {
d[0][j] = j
}
for j := 1; j <= len(t); j++ {
for i := 1; i <= len(s); i++ {
if s[i-1] == t[j-1] {
d[i][j] = d[i-1][j-1]
} else {
min := d[i-1][j]
if d[i][j-1] < min {
min = d[i][j-1]
}
if d[i-1][j-1] < min {
min = d[i-1][j-1]
}
d[i][j] = min + 1
}
}
}
return d[len(s)][len(t)]
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
| 8,562 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/powershell_completions.go | // PowerShell completions are based on the amazing work from clap:
// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs
//
// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
// can be downloaded separately for windows 7 or 8.1).
package cobra
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/pflag"
)
var powerShellCompletionTemplate = `using namespace System.Management.Automation
using namespace System.Management.Automation.Language
Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock {
param($wordToComplete, $commandAst, $cursorPosition)
$commandElements = $commandAst.CommandElements
$command = @(
'%s'
for ($i = 1; $i -lt $commandElements.Count; $i++) {
$element = $commandElements[$i]
if ($element -isnot [StringConstantExpressionAst] -or
$element.StringConstantType -ne [StringConstantType]::BareWord -or
$element.Value.StartsWith('-')) {
break
}
$element.Value
}
) -join ';'
$completions = @(switch ($command) {%s
})
$completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
Sort-Object -Property ListItemText
}`
func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) {
var cmdName string
if previousCommandName == "" {
cmdName = cmd.Name()
} else {
cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name())
}
fmt.Fprintf(out, "\n '%s' {", cmdName)
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
usage := escapeStringForPowerShell(flag.Usage)
if len(flag.Shorthand) > 0 {
fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage)
}
fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage)
})
for _, subCmd := range cmd.Commands() {
usage := escapeStringForPowerShell(subCmd.Short)
fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage)
}
fmt.Fprint(out, "\n break\n }")
for _, subCmd := range cmd.Commands() {
generatePowerShellSubcommandCases(out, subCmd, cmdName)
}
}
func escapeStringForPowerShell(s string) string {
return strings.Replace(s, "'", "''", -1)
}
// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer.
func (c *Command) GenPowerShellCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
var subCommandCases bytes.Buffer
generatePowerShellSubcommandCases(&subCommandCases, c, "")
fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String())
_, err := buf.WriteTo(w)
return err
}
// GenPowerShellCompletionFile generates PowerShell completion file.
func (c *Command) GenPowerShellCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenPowerShellCompletion(outFile)
}
| 8,563 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/go.sum | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
| 8,564 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/bash_completions.go | package cobra
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strings"
"github.com/spf13/pflag"
)
// Annotations for Bash completion.
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
BashCompCustom = "cobra_annotation_bash_completion_custom"
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
func writePreamble(buf *bytes.Buffer, name string) {
buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
buf.WriteString(fmt.Sprintf(`
__%[1]s_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
# _init_completion. This is a very minimal version of that function.
__%[1]s_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref "$@" cur prev words cword
}
__%[1]s_index_of_word()
{
local w word=$1
shift
index=0
for w in "$@"; do
[[ $w = "$word" ]] && return
index=$((index+1))
done
index=-1
}
__%[1]s_contains_word()
{
local w word=$1; shift
for w in "$@"; do
[[ $w = "$word" ]] && return
done
return 1
}
__%[1]s_handle_go_custom_completion()
{
__%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
local out requestComp lastParam lastChar comp directive args
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
args=("${words[@]:1}")
requestComp="${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
__%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
requestComp="${requestComp} \"\""
fi
__%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval "${requestComp}" 2>/dev/null)
# Extract the directive integer at the very end of the output following a colon (:)
directive=${out##*:}
# Remove the directive
out=${out%%:*}
if [ "${directive}" = "${out}" ]; then
# There is not directive specified
directive=0
fi
__%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
__%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
__%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
return
else
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__%[1]s_debug "${FUNCNAME[0]}: activating no space"
compopt -o nospace
fi
fi
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
compopt +o default
fi
fi
fi
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
# Do not use quotes around the $out variable or else newline
# characters will be kept.
for filter in ${out[*]}; do
fullFilter+="$filter|"
done
filteringCmd="_filedir $fullFilter"
__%[1]s_debug "File filtering command: $filteringCmd"
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
local subDir
# Use printf to strip any trailing newline
subdir=$(printf "%%s" "${out[0]}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
__%[1]s_handle_subdirs_in_dir_flag "$subdir"
else
__%[1]s_debug "Listing directories in ."
_filedir -d
fi
else
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${out[*]}" -- "$cur")
fi
}
__%[1]s_handle_reply()
{
__%[1]s_debug "${FUNCNAME[0]}"
local comp
case $cur in
-*)
if [[ $(type -t compopt) = "builtin" ]]; then
compopt -o nospace
fi
local allflags
if [ ${#must_have_one_flag[@]} -ne 0 ]; then
allflags=("${must_have_one_flag[@]}")
else
allflags=("${flags[*]} ${two_word_flags[*]}")
fi
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${allflags[*]}" -- "$cur")
if [[ $(type -t compopt) = "builtin" ]]; then
[[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
fi
# complete after --flag=abc
if [[ $cur == *=* ]]; then
if [[ $(type -t compopt) = "builtin" ]]; then
compopt +o nospace
fi
local index flag
flag="${cur%%=*}"
__%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
COMPREPLY=()
if [[ ${index} -ge 0 ]]; then
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
if [ -n "${ZSH_VERSION}" ]; then
# zsh completion needs --flag= prefix
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
fi
fi
fi
return 0;
;;
esac
# check if we are handling a flag with special work handling
local index
__%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
if [[ ${index} -ge 0 ]]; then
${flags_completion[${index}]}
return
fi
# we are parsing a flag and don't have a special handler, no completion
if [[ ${cur} != "${words[cword]}" ]]; then
return
fi
local completions
completions=("${commands[@]}")
if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
completions+=("${must_have_one_noun[@]}")
elif [[ -n "${has_completion_function}" ]]; then
# if a go completion function is provided, defer to that function
__%[1]s_handle_go_custom_completion
fi
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
completions+=("${must_have_one_flag[@]}")
fi
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${completions[*]}" -- "$cur")
if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
if declare -F __%[1]s_custom_func >/dev/null; then
# try command name qualified custom func
__%[1]s_custom_func
else
# otherwise fall back to unqualified for compatibility
declare -F __custom_func >/dev/null && __custom_func
fi
fi
# available in bash-completion >= 2, not always present on macOS
if declare -F __ltrim_colon_completions >/dev/null; then
__ltrim_colon_completions "$cur"
fi
# If there is only 1 completion and it is a flag with an = it will be completed
# but we don't want a space after the =
if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
compopt -o nospace
fi
}
# The arguments should be in the form "ext1|ext2|extn"
__%[1]s_handle_filename_extension_flag()
{
local ext="$1"
_filedir "@(${ext})"
}
__%[1]s_handle_subdirs_in_dir_flag()
{
local dir="$1"
pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
}
__%[1]s_handle_flag()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
local flagvalue
# if the word contained an =
if [[ ${words[c]} == *"="* ]]; then
flagvalue=${flagname#*=} # take in as flagvalue after the =
flagname=${flagname%%=*} # strip everything after the =
flagname="${flagname}=" # but put the = back
fi
__%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
must_have_one_flag=()
fi
# if you set a flag which only applies to this command, don't show subcommands
if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
commands=()
fi
# keep flag value with flagname as flaghash
# flaghash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
if [ -n "${flagvalue}" ] ; then
flaghash[${flagname}]=${flagvalue}
elif [ -n "${words[ $((c+1)) ]}" ] ; then
flaghash[${flagname}]=${words[ $((c+1)) ]}
else
flaghash[${flagname}]="true" # pad "true" for bool flag
fi
fi
# skip the argument to a two word flag
if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
__%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
commands=()
fi
fi
c=$((c+1))
}
__%[1]s_handle_noun()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
must_have_one_noun=()
elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
must_have_one_noun=()
fi
nouns+=("${words[c]}")
c=$((c+1))
}
__%[1]s_handle_command()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
local next_command
if [[ -n ${last_command} ]]; then
next_command="_${last_command}_${words[c]//:/__}"
else
if [[ $c -eq 0 ]]; then
next_command="_%[1]s_root_command"
else
next_command="_${words[c]//:/__}"
fi
fi
c=$((c+1))
__%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
declare -F "$next_command" >/dev/null && $next_command
}
__%[1]s_handle_word()
{
if [[ $c -ge $cword ]]; then
__%[1]s_handle_reply
return
fi
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if [[ "${words[c]}" == -* ]]; then
__%[1]s_handle_flag
elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
__%[1]s_handle_command
elif [[ $c -eq 0 ]]; then
__%[1]s_handle_command
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
# aliashash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
words[c]=${aliashash[${words[c]}]}
__%[1]s_handle_command
else
__%[1]s_handle_noun
fi
else
__%[1]s_handle_noun
fi
__%[1]s_handle_word
}
`, name, ShellCompNoDescRequestCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
}
func writePostscript(buf *bytes.Buffer, name string) {
name = strings.Replace(name, ":", "__", -1)
buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
buf.WriteString(fmt.Sprintf(`{
local cur prev words cword
declare -A flaghash 2>/dev/null || :
declare -A aliashash 2>/dev/null || :
if declare -F _init_completion >/dev/null 2>&1; then
_init_completion -s || return
else
__%[1]s_init_completion -n "=" || return
fi
local c=0
local flags=()
local two_word_flags=()
local local_nonpersistent_flags=()
local flags_with_completion=()
local flags_completion=()
local commands=("%[1]s")
local must_have_one_flag=()
local must_have_one_noun=()
local has_completion_function
local last_command
local nouns=()
__%[1]s_handle_word
}
`, name))
buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%s %s
else
complete -o default -o nospace -F __start_%s %s
fi
`, name, name, name, name))
buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
}
func writeCommands(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" commands=()\n")
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
}
buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
writeCmdAliases(buf, c)
}
buf.WriteString("\n")
}
func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) > 0 {
ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
} else {
ext = "_filedir"
}
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
case BashCompCustom:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
if len(value) > 0 {
handlers := strings.Join(value, "; ")
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
} else {
buf.WriteString(" flags_completion+=(:)\n")
}
case BashCompSubdirsInDir:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) == 1 {
ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
} else {
ext = "_filedir -d"
}
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
}
}
}
func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
name := flag.Shorthand
format := " "
if len(flag.NoOptDefVal) == 0 {
format += "two_word_"
}
format += "flags+=(\"-%s\")\n"
buf.WriteString(fmt.Sprintf(format, name))
writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
}
func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
name := flag.Name
format := " flags+=(\"--%s"
if len(flag.NoOptDefVal) == 0 {
format += "="
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, name))
if len(flag.NoOptDefVal) == 0 {
format = " two_word_flags+=(\"--%s\")\n"
buf.WriteString(fmt.Sprintf(format, name))
}
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
}
func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
name := flag.Name
format := " local_nonpersistent_flags+=(\"--%[1]s\")\n"
if len(flag.NoOptDefVal) == 0 {
format += " local_nonpersistent_flags+=(\"--%[1]s=\")\n"
}
buf.WriteString(fmt.Sprintf(format, name))
if len(flag.Shorthand) > 0 {
buf.WriteString(fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
}
}
// Setup annotations for go completions for registered flags
func prepareCustomAnnotationsForFlags(cmd *Command) {
for flag := range flagCompletionFunctions {
// Make sure the completion script calls the __*_go_custom_completion function for
// every registered flag. We need to do this here (and not when the flag was registered
// for completion) so that we can know the root command name for the prefix
// of __<prefix>_go_custom_completion
if flag.Annotations == nil {
flag.Annotations = map[string][]string{}
}
flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
}
}
func writeFlags(buf *bytes.Buffer, cmd *Command) {
prepareCustomAnnotationsForFlags(cmd)
buf.WriteString(` flags=()
two_word_flags=()
local_nonpersistent_flags=()
flags_with_completion=()
flags_completion=()
`)
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag, cmd)
}
// localNonPersistentFlags are used to stop the completion of subcommands when one is set
// if TraverseChildren is true we should allow to complete subcommands
if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
writeLocalNonPersistentFlag(buf, flag)
}
})
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag, cmd)
}
})
buf.WriteString("\n")
}
func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_flag=()\n")
flags := cmd.NonInheritedFlags()
flags.VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
for key := range flag.Annotations {
switch key {
case BashCompOneRequiredFlag:
format := " must_have_one_flag+=(\"--%s"
if flag.Value.Type() != "bool" {
format += "="
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, flag.Name))
if len(flag.Shorthand) > 0 {
buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
}
}
}
})
}
func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_noun=()\n")
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
// Remove any description that may be included following a tab character.
// Descriptions are not supported by bash completion.
value = strings.Split(value, "\t")[0]
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
if cmd.ValidArgsFunction != nil {
buf.WriteString(" has_completion_function=1\n")
}
}
func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
if len(cmd.Aliases) == 0 {
return
}
sort.Sort(sort.StringSlice(cmd.Aliases))
buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
}
buf.WriteString(` fi`)
buf.WriteString("\n")
}
func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" noun_aliases=()\n")
sort.Sort(sort.StringSlice(cmd.ArgAliases))
for _, value := range cmd.ArgAliases {
buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
}
}
func gen(buf *bytes.Buffer, cmd *Command) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
}
gen(buf, c)
}
commandName := cmd.CommandPath()
commandName = strings.Replace(commandName, " ", "_", -1)
commandName = strings.Replace(commandName, ":", "__", -1)
if cmd.Root() == cmd {
buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
} else {
buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
}
buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
buf.WriteString("\n")
buf.WriteString(" command_aliases=()\n")
buf.WriteString("\n")
writeCommands(buf, cmd)
writeFlags(buf, cmd)
writeRequiredFlag(buf, cmd)
writeRequiredNouns(buf, cmd)
writeArgAliases(buf, cmd)
buf.WriteString("}\n\n")
}
// GenBashCompletion generates bash completion file and writes to the passed writer.
func (c *Command) GenBashCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
writePreamble(buf, c.Name())
if len(c.BashCompletionFunction) > 0 {
buf.WriteString(c.BashCompletionFunction + "\n")
}
gen(buf, c)
writePostscript(buf, c.Name())
_, err := buf.WriteTo(w)
return err
}
func nonCompletableFlag(flag *pflag.Flag) bool {
return flag.Hidden || len(flag.Deprecated) > 0
}
// GenBashCompletionFile generates bash completion file.
func (c *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenBashCompletion(outFile)
}
| 8,565 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/zsh_completions.go | package cobra
import (
"bytes"
"fmt"
"io"
"os"
)
// GenZshCompletionFile generates zsh completion file including descriptions.
func (c *Command) GenZshCompletionFile(filename string) error {
return c.genZshCompletionFile(filename, true)
}
// GenZshCompletion generates zsh completion file including descriptions
// and writes it to the passed writer.
func (c *Command) GenZshCompletion(w io.Writer) error {
return c.genZshCompletion(w, true)
}
// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
return c.genZshCompletionFile(filename, false)
}
// GenZshCompletionNoDesc generates zsh completion file without descriptions
// and writes it to the passed writer.
func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
return c.genZshCompletion(w, false)
}
// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
// not consistent with Bash completion. It has therefore been disabled.
// Instead, when no other completion is specified, file completion is done by
// default for every argument. One can disable file completion on a per-argument
// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
// To achieve file extension filtering, one can use ValidArgsFunction and
// ShellCompDirectiveFilterFileExt.
//
// Deprecated
func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
return nil
}
// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
// been disabled.
// To achieve the same behavior across all shells, one can use
// ValidArgs (for the first argument only) or ValidArgsFunction for
// any argument (can include the first one also).
//
// Deprecated
func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
return nil
}
func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.genZshCompletion(outFile, includeDesc)
}
func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genZshComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
func genZshComp(buf *bytes.Buffer, name string, includeDesc bool) {
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
buf.WriteString(fmt.Sprintf(`#compdef _%[1]s %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
__%[1]s_debug()
{
local file="$BASH_COMP_DEBUG_FILE"
if [[ -n ${file} ]]; then
echo "$*" >> "${file}"
fi
}
_%[1]s()
{
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp
local -a completions
__%[1]s_debug "\n========= starting completion logic =========="
__%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
# The user could have moved the cursor backwards on the command-line.
# We need to trigger completion from the $CURRENT location, so we need
# to truncate the command-line ($words) up to the $CURRENT location.
# (We cannot use $CURSOR as its value does not work when a command is an alias.)
words=("${=words[1,CURRENT]}")
__%[1]s_debug "Truncated words[*]: ${words[*]},"
lastParam=${words[-1]}
lastChar=${lastParam[-1]}
__%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
# For zsh, when completing a flag with an = (e.g., %[1]s -n=<TAB>)
# completions must be prefixed with the flag
setopt local_options BASH_REMATCH
if [[ "${lastParam}" =~ '-.*=' ]]; then
# We are dealing with a flag with an =
flagPrefix="-P ${BASH_REMATCH}"
fi
# Prepare the command to obtain completions
requestComp="${words[1]} %[2]s ${words[2,-1]}"
if [ "${lastChar}" = "" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go completion code.
__%[1]s_debug "Adding extra empty parameter"
requestComp="${requestComp} \"\""
fi
__%[1]s_debug "About to call: eval ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval ${requestComp} 2>/dev/null)
__%[1]s_debug "completion output: ${out}"
# Extract the directive integer following a : from the last line
local lastLine
while IFS='\n' read -r line; do
lastLine=${line}
done < <(printf "%%s\n" "${out[@]}")
__%[1]s_debug "last line: ${lastLine}"
if [ "${lastLine[1]}" = : ]; then
directive=${lastLine[2,-1]}
# Remove the directive including the : and the newline
local suffix
(( suffix=${#lastLine}+2))
out=${out[1,-$suffix]}
else
# There is no directive specified. Leave $out as is.
__%[1]s_debug "No directive found. Setting do default"
directive=0
fi
__%[1]s_debug "directive: ${directive}"
__%[1]s_debug "completions: ${out}"
__%[1]s_debug "flagPrefix: ${flagPrefix}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
__%[1]s_debug "Completion received error. Ignoring completions."
return
fi
compCount=0
while IFS='\n' read -r comp; do
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
# For zsh's _describe, we need to use a : instead of a TAB.
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
local tab=$(printf '\t')
comp=${comp//$tab/:}
((compCount++))
__%[1]s_debug "Adding completion: ${comp}"
completions+=${comp}
lastComp=$comp
fi
done < <(printf "%%s\n" "${out[@]}")
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
filteringCmd='_files'
for filter in ${completions[@]}; do
if [ ${filter[1]} != '*' ]; then
# zsh requires a glob pattern to do file filtering
filter="\*.$filter"
fi
filteringCmd+=" -g $filter"
done
filteringCmd+=" ${flagPrefix}"
__%[1]s_debug "File filtering command: $filteringCmd"
_arguments '*:filename:'"$filteringCmd"
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
local subDir
subdir="${completions[1]}"
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "${subdir}" >/dev/null 2>&1
else
__%[1]s_debug "Listing directories in ."
fi
_arguments '*:dirname:_files -/'" ${flagPrefix}"
if [ -n "$subdir" ]; then
popd >/dev/null 2>&1
fi
elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then
__%[1]s_debug "Activating nospace."
# We can use compadd here as there is no description when
# there is only one completion.
compadd -S '' "${lastComp}"
elif [ ${compCount} -eq 0 ]; then
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
__%[1]s_debug "deactivating file completion"
else
# Perform file completion
__%[1]s_debug "activating file completion"
_arguments '*:filename:_files'" ${flagPrefix}"
fi
else
_describe "completions" completions $(echo $flagPrefix)
fi
}
# don't run the completion function when being source-ed or eval-ed
if [ "$funcstack[1]" = "_%[1]s" ]; then
_%[1]s
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
}
| 8,566 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/projects_using_cobra.md | ## Projects using Cobra
- [Arduino CLI](https://github.com/arduino/arduino-cli)
- [Bleve](http://www.blevesearch.com/)
- [CockroachDB](http://www.cockroachlabs.com/)
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
- [Delve](https://github.com/derekparker/delve)
- [Docker (distribution)](https://github.com/docker/distribution)
- [Etcd](https://etcd.io/)
- [Gardener](https://github.com/gardener/gardenctl)
- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl)
- [Git Bump](https://github.com/erdaltsksn/git-bump)
- [Github CLI](https://github.com/cli/cli)
- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
- [Golangci-lint](https://golangci-lint.run)
- [GopherJS](http://www.gopherjs.org/)
- [Helm](https://helm.sh)
- [Hugo](https://gohugo.io)
- [Istio](https://istio.io)
- [Kool](https://github.com/kool-dev/kool)
- [Kubernetes](http://kubernetes.io/)
- [Linkerd](https://linkerd.io/)
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
- [Metal Stack CLI](https://github.com/metal-stack/metalctl)
- [Moby (former Docker)](https://github.com/moby/moby)
- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
- [OpenShift](https://www.openshift.com/)
- [Pouch](https://github.com/alibaba/pouch)
- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
- [Prototool](https://github.com/uber/prototool)
- [Random](https://github.com/erdaltsksn/random)
- [Rclone](https://rclone.org/)
- [Skaffold](https://skaffold.dev/)
- [Tendermint](https://github.com/tendermint/tendermint)
- [Werf](https://werf.io/)
| 8,567 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/cobra/.travis.yml | language: go
stages:
- diff
- test
- build
go:
- 1.12.x
- 1.13.x
- tip
before_install:
- go get -u github.com/kyoh86/richgo
- go get -u github.com/mitchellh/gox
matrix:
allow_failures:
- go: tip
include:
- stage: diff
go: 1.13.x
script: make fmt
- stage: build
go: 1.13.x
script: make cobra_generator
script:
- make test
| 8,568 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/go.mod | module github.com/spf13/viper
go 1.12
require (
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c
github.com/coreos/bbolt v1.3.2 // indirect
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.7
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect
github.com/hashicorp/hcl v1.0.0
github.com/jonboulle/clockwork v0.1.0 // indirect
github.com/magiconair/properties v1.8.1
github.com/mitchellh/mapstructure v1.1.2
github.com/pelletier/go-toml v1.2.0
github.com/prometheus/client_golang v0.9.3 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/soheilhy/cmux v0.1.4 // indirect
github.com/spf13/afero v1.1.2
github.com/spf13/cast v1.3.0
github.com/spf13/jwalterweatherman v1.0.0
github.com/spf13/pflag v1.0.3
github.com/stretchr/testify v1.3.0
github.com/subosito/gotenv v1.2.0
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.2 // indirect
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0 // indirect
gopkg.in/ini.v1 v1.51.0
gopkg.in/yaml.v2 v2.2.4
)
| 8,569 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/README.md | 
[](https://github.com/avelino/awesome-go#configuration)
[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
[](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://goreportcard.com/report/github.com/spf13/viper)
[](https://pkg.go.dev/mod/github.com/spf13/viper)
**Go configuration with fangs!**
Many Go projects are built using Viper including:
* [Hugo](http://gohugo.io)
* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
* [Imgur’s Incus](https://github.com/Imgur/incus)
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
* [Docker Notary](https://github.com/docker/Notary)
* [BloomApi](https://www.bloomapi.com/)
* [doctl](https://github.com/digitalocean/doctl)
* [Clairctl](https://github.com/jgsqware/clairctl)
* [Mercure](https://mercure.rocks)
## Install
```console
go get github.com/spf13/viper
```
## What is Viper?
Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
to work within an application, and can handle all types of configuration needs
and formats. It supports:
* setting defaults
* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files
* live watching and re-reading of config files (optional)
* reading from environment variables
* reading from remote config systems (etcd or Consul), and watching changes
* reading from command line flags
* reading from buffer
* setting explicit values
Viper can be thought of as a registry for all of your applications configuration needs.
## Why Viper?
When building a modern application, you don’t want to worry about
configuration file formats; you want to focus on building awesome software.
Viper is here to help with that.
Viper does the following for you:
1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats.
2. Provide a mechanism to set default values for your different configuration options.
3. Provide a mechanism to set override values for options specified through command line flags.
4. Provide an alias system to easily rename parameters without breaking existing code.
5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default.
Viper uses the following precedence order. Each item takes precedence over the item below it:
* explicit call to `Set`
* flag
* env
* config
* key/value store
* default
**Important:** Viper configuration keys are case insensitive.
There are ongoing discussions about making that optional.
## Putting Values into Viper
### Establishing Defaults
A good configuration system will support default values. A default value is not
required for a key, but it’s useful in the event that a key hasn't been set via
config file, environment variable, remote configuration or flag.
Examples:
```go
viper.SetDefault("ContentDir", "content")
viper.SetDefault("LayoutDir", "layouts")
viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
```
### Reading Config Files
Viper requires minimal configuration so it knows where to look for config files.
Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but
currently a single Viper instance only supports a single configuration file.
Viper does not default to any configuration search paths leaving defaults decision
to an application.
Here is an example of how to use Viper to search for and read a configuration file.
None of the specific paths are required, but at least one path should be provided
where a configuration file is expected.
```go
viper.SetConfigName("config") // name of config file (without extension)
viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name
viper.AddConfigPath("/etc/appname/") // path to look for the config file in
viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
viper.AddConfigPath(".") // optionally look for config in the working directory
err := viper.ReadInConfig() // Find and read the config file
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
```
You can handle the specific case where no config file is found like this:
```go
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// Config file not found; ignore error if desired
} else {
// Config file was found but another error was produced
}
}
// Config file found and successfully parsed
```
*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc`
### Writing Config Files
Reading from config files is useful, but at times you want to store all modifications made at run time.
For that, a bunch of commands are available, each with its own purpose:
* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists.
* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists.
* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists.
* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists.
As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate.
A small examples section:
```go
viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName'
viper.SafeWriteConfig()
viper.WriteConfigAs("/path/to/my/.config")
viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written
viper.SafeWriteConfigAs("/path/to/my/.other_config")
```
### Watching and re-reading config files
Viper supports the ability to have your application live read a config file while running.
Gone are the days of needing to restart a server to have a config take effect,
viper powered applications can read an update to a config file while running and
not miss a beat.
Simply tell the viper instance to watchConfig.
Optionally you can provide a function for Viper to run each time a change occurs.
**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
```go
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
fmt.Println("Config file changed:", e.Name)
})
```
### Reading Config from io.Reader
Viper predefines many configuration sources such as files, environment
variables, flags, and remote K/V store, but you are not bound to them. You can
also implement your own required configuration source and feed it to viper.
```go
viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
// any approach to require this configuration into your program.
var yamlExample = []byte(`
Hacker: true
name: steve
hobbies:
- skateboarding
- snowboarding
- go
clothing:
jacket: leather
trousers: denim
age: 35
eyes : brown
beard: true
`)
viper.ReadConfig(bytes.NewBuffer(yamlExample))
viper.Get("name") // this would be "steve"
```
### Setting Overrides
These could be from a command line flag, or from your own application logic.
```go
viper.Set("Verbose", true)
viper.Set("LogFile", LogFile)
```
### Registering and Using Aliases
Aliases permit a single value to be referenced by multiple keys
```go
viper.RegisterAlias("loud", "Verbose")
viper.Set("verbose", true) // same result as next line
viper.Set("loud", true) // same result as prior line
viper.GetBool("loud") // true
viper.GetBool("verbose") // true
```
### Working with Environment Variables
Viper has full support for environment variables. This enables 12 factor
applications out of the box. There are five methods that exist to aid working
with ENV:
* `AutomaticEnv()`
* `BindEnv(string...) : error`
* `SetEnvPrefix(string)`
* `SetEnvKeyReplacer(string...) *strings.Replacer`
* `AllowEmptyEnv(bool)`
_When working with ENV variables, it’s important to recognize that Viper
treats ENV variables as case sensitive._
Viper provides a mechanism to try to ensure that ENV variables are unique. By
using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from
the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
prefix.
`BindEnv` takes one or two parameters. The first parameter is the key name, the
second is the name of the environment variable. The name of the environment
variable is case sensitive. If the ENV variable name is not provided, then
Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter),
it **does not** automatically add the prefix. For example if the second parameter is "id",
Viper will look for the ENV variable "ID".
One important thing to recognize when working with ENV variables is that the
value will be read each time it is accessed. Viper does not fix the value when
the `BindEnv` is called.
`AutomaticEnv` is a powerful helper especially when combined with
`SetEnvPrefix`. When called, Viper will check for an environment variable any
time a `viper.Get` request is made. It will apply the following rules. It will
check for a environment variable with a name matching the key uppercased and
prefixed with the `EnvPrefix` if set.
`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
keys to an extent. This is useful if you want to use `-` or something in your
`Get()` calls, but want your environmental variables to use `_` delimiters. An
example of using it can be found in `viper_test.go`.
Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function.
Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic.
By default empty environment variables are considered unset and will fall back to
the next configuration source. To treat empty environment variables as set, use
the `AllowEmptyEnv` method.
#### Env example
```go
SetEnvPrefix("spf") // will be uppercased automatically
BindEnv("id")
os.Setenv("SPF_ID", "13") // typically done outside of the app
id := Get("id") // 13
```
### Working with Flags
Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
as used in the [Cobra](https://github.com/spf13/cobra) library.
Like `BindEnv`, the value is not set when the binding method is called, but when
it is accessed. This means you can bind as early as you want, even in an
`init()` function.
For individual flags, the `BindPFlag()` method provides this functionality.
Example:
```go
serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
```
You can also bind an existing set of pflags (pflag.FlagSet):
Example:
```go
pflag.Int("flagname", 1234, "help message for flagname")
pflag.Parse()
viper.BindPFlags(pflag.CommandLine)
i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
```
The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
the use of other packages that use the [flag](https://golang.org/pkg/flag/)
package from the standard library. The pflag package can handle the flags
defined for the flag package by importing these flags. This is accomplished
by a calling a convenience function provided by the pflag package called
AddGoFlagSet().
Example:
```go
package main
import (
"flag"
"github.com/spf13/pflag"
)
func main() {
// using standard library "flag" package
flag.Int("flagname", 1234, "help message for flagname")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
viper.BindPFlags(pflag.CommandLine)
i := viper.GetInt("flagname") // retrieve value from viper
...
}
```
#### Flag interfaces
Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
```go
type myFlag struct {}
func (f myFlag) HasChanged() bool { return false }
func (f myFlag) Name() string { return "my-flag-name" }
func (f myFlag) ValueString() string { return "my-flag-value" }
func (f myFlag) ValueType() string { return "string" }
```
Once your flag implements this interface, you can simply tell Viper to bind it:
```go
viper.BindFlagValue("my-flag-name", myFlag{})
```
`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
```go
type myFlagSet struct {
flags []myFlag
}
func (f myFlagSet) VisitAll(fn func(FlagValue)) {
for _, flag := range flags {
fn(flag)
}
}
```
Once your flag set implements this interface, you can simply tell Viper to bind it:
```go
fSet := myFlagSet{
flags: []myFlag{myFlag{}, myFlag{}},
}
viper.BindFlagValues("my-flags", fSet)
```
### Remote Key/Value Store Support
To enable remote support in Viper, do a blank import of the `viper/remote`
package:
`import _ "github.com/spf13/viper/remote"`
Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path
in a Key/Value store such as etcd or Consul. These values take precedence over
default values, but are overridden by configuration values retrieved from disk,
flags, or environment variables.
Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve
configuration from the K/V store, which means that you can store your
configuration values encrypted and have them automatically decrypted if you have
the correct gpg keyring. Encryption is optional.
You can use remote configuration in conjunction with local configuration, or
independently of it.
`crypt` has a command-line helper that you can use to put configurations in your
K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
```bash
$ go get github.com/bketelsen/crypt/bin/crypt
$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
```
Confirm that your value was set:
```bash
$ crypt get -plaintext /config/hugo.json
```
See the `crypt` documentation for examples of how to set encrypted values, or
how to use Consul.
### Remote Key/Value Store Example - Unencrypted
#### etcd
```go
viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
err := viper.ReadRemoteConfig()
```
#### Consul
You need to set a key to Consul key/value storage with JSON value containing your desired config.
For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
```json
{
"port": 8080,
"hostname": "myhostname.com"
}
```
```go
viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY")
viper.SetConfigType("json") // Need to explicitly set this to json
err := viper.ReadRemoteConfig()
fmt.Println(viper.Get("port")) // 8080
fmt.Println(viper.Get("hostname")) // myhostname.com
```
#### Firestore
```go
viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document")
viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml"
err := viper.ReadRemoteConfig()
```
Of course, you're allowed to use `SecureRemoteProvider` also
### Remote Key/Value Store Example - Encrypted
```go
viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
err := viper.ReadRemoteConfig()
```
### Watching Changes in etcd - Unencrypted
```go
// alternatively, you can create a new viper instance.
var runtime_viper = viper.New()
runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
// read from remote config the first time.
err := runtime_viper.ReadRemoteConfig()
// unmarshal config
runtime_viper.Unmarshal(&runtime_conf)
// open a goroutine to watch remote changes forever
go func(){
for {
time.Sleep(time.Second * 5) // delay after each request
// currently, only tested with etcd support
err := runtime_viper.WatchRemoteConfig()
if err != nil {
log.Errorf("unable to read remote config: %v", err)
continue
}
// unmarshal new config into our runtime config struct. you can also use channel
// to implement a signal to notify the system of the changes
runtime_viper.Unmarshal(&runtime_conf)
}
}()
```
## Getting Values From Viper
In Viper, there are a few ways to get a value depending on the value’s type.
The following functions and methods exist:
* `Get(key string) : interface{}`
* `GetBool(key string) : bool`
* `GetFloat64(key string) : float64`
* `GetInt(key string) : int`
* `GetIntSlice(key string) : []int`
* `GetString(key string) : string`
* `GetStringMap(key string) : map[string]interface{}`
* `GetStringMapString(key string) : map[string]string`
* `GetStringSlice(key string) : []string`
* `GetTime(key string) : time.Time`
* `GetDuration(key string) : time.Duration`
* `IsSet(key string) : bool`
* `AllSettings() : map[string]interface{}`
One important thing to recognize is that each Get function will return a zero
value if it’s not found. To check if a given key exists, the `IsSet()` method
has been provided.
Example:
```go
viper.GetString("logfile") // case-insensitive Setting & Getting
if viper.GetBool("verbose") {
fmt.Println("verbose enabled")
}
```
### Accessing nested keys
The accessor methods also accept formatted paths to deeply nested keys. For
example, if the following JSON file is loaded:
```json
{
"host": {
"address": "localhost",
"port": 5799
},
"datastore": {
"metric": {
"host": "127.0.0.1",
"port": 3099
},
"warehouse": {
"host": "198.0.0.1",
"port": 2112
}
}
}
```
Viper can access a nested field by passing a `.` delimited path of keys:
```go
GetString("datastore.metric.host") // (returns "127.0.0.1")
```
This obeys the precedence rules established above; the search for the path
will cascade through the remaining configuration registries until found.
For example, given this configuration file, both `datastore.metric.host` and
`datastore.metric.port` are already defined (and may be overridden). If in addition
`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
However, if `datastore.metric` was overridden (by a flag, an environment variable,
the `Set()` method, …) with an immediate value, then all sub-keys of
`datastore.metric` become undefined, they are “shadowed” by the higher-priority
configuration level.
Lastly, if there exists a key that matches the delimited key path, its value
will be returned instead. E.g.
```json
{
"datastore.metric.host": "0.0.0.0",
"host": {
"address": "localhost",
"port": 5799
},
"datastore": {
"metric": {
"host": "127.0.0.1",
"port": 3099
},
"warehouse": {
"host": "198.0.0.1",
"port": 2112
}
}
}
GetString("datastore.metric.host") // returns "0.0.0.0"
```
### Extract sub-tree
Extract sub-tree from Viper.
For example, `viper` represents:
```json
app:
cache1:
max-items: 100
item-size: 64
cache2:
max-items: 200
item-size: 80
```
After executing:
```go
subv := viper.Sub("app.cache1")
```
`subv` represents:
```json
max-items: 100
item-size: 64
```
Suppose we have:
```go
func NewCache(cfg *Viper) *Cache {...}
```
which creates a cache based on config information formatted as `subv`.
Now it’s easy to create these 2 caches separately as:
```go
cfg1 := viper.Sub("app.cache1")
cache1 := NewCache(cfg1)
cfg2 := viper.Sub("app.cache2")
cache2 := NewCache(cfg2)
```
### Unmarshaling
You also have the option of Unmarshaling all or a specific value to a struct, map,
etc.
There are two methods to do this:
* `Unmarshal(rawVal interface{}) : error`
* `UnmarshalKey(key string, rawVal interface{}) : error`
Example:
```go
type config struct {
Port int
Name string
PathMap string `mapstructure:"path_map"`
}
var C config
err := viper.Unmarshal(&C)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
```
If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter),
you have to change the delimiter:
```go
v := viper.NewWithOptions(viper.KeyDelimiter("::"))
v.SetDefault("chart::values", map[string]interface{}{
"ingress": map[string]interface{}{
"annotations": map[string]interface{}{
"traefik.frontend.rule.type": "PathPrefix",
"traefik.ingress.kubernetes.io/ssl-redirect": "true",
},
},
})
type config struct {
Chart struct{
Values map[string]interface{}
}
}
var C config
v.Unmarshal(&C)
```
Viper also supports unmarshaling into embedded structs:
```go
/*
Example config:
module:
enabled: true
token: 89h3f98hbwf987h3f98wenf89ehf
*/
type config struct {
Module struct {
Enabled bool
moduleConfig `mapstructure:",squash"`
}
}
// moduleConfig could be in a module specific package
type moduleConfig struct {
Token string
}
var C config
err := viper.Unmarshal(&C)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
```
Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default.
### Marshalling to string
You may need to marshal all the settings held in viper into a string rather than write them to a file.
You can use your favorite format's marshaller with the config returned by `AllSettings()`.
```go
import (
yaml "gopkg.in/yaml.v2"
// ...
)
func yamlStringSettings() string {
c := viper.AllSettings()
bs, err := yaml.Marshal(c)
if err != nil {
log.Fatalf("unable to marshal config to YAML: %v", err)
}
return string(bs)
}
```
## Viper or Vipers?
Viper comes ready to use out of the box. There is no configuration or
initialization needed to begin using Viper. Since most applications will want
to use a single central repository for their configuration, the viper package
provides this. It is similar to a singleton.
In all of the examples above, they demonstrate using viper in its singleton
style approach.
### Working with multiple vipers
You can also create many different vipers for use in your application. Each will
have its own unique set of configurations and values. Each can read from a
different config file, key value store, etc. All of the functions that viper
package supports are mirrored as methods on a viper.
Example:
```go
x := viper.New()
y := viper.New()
x.SetDefault("ContentDir", "content")
y.SetDefault("ContentDir", "foobar")
//...
```
When working with multiple vipers, it is up to the user to keep track of the
different vipers.
## Q & A
Q: Why is it called “Viper”?
A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
to [Cobra](https://github.com/spf13/cobra). While both can operate completely
independently, together they make a powerful pair to handle much of your
application foundation needs.
Q: Why is it called “Cobra”?
A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
| 8,570 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/Makefile | # A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
OS = $(shell uname | tr A-Z a-z)
export PATH := $(abspath bin/):${PATH}
# Build variables
BUILD_DIR ?= build
export CGO_ENABLED ?= 0
export GOOS = $(shell go env GOOS)
ifeq (${VERBOSE}, 1)
ifeq ($(filter -v,${GOARGS}),)
GOARGS += -v
endif
TEST_FORMAT = short-verbose
endif
# Dependency versions
GOTESTSUM_VERSION = 0.4.0
GOLANGCI_VERSION = 1.21.0
# Add the ability to override some variables
# Use with care
-include override.mk
.PHONY: clear
clear: ## Clear the working area and the project
rm -rf bin/
.PHONY: check
check: test lint ## Run tests and linters
bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION}
@ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum
bin/gotestsum-${GOTESTSUM_VERSION}:
@mkdir -p bin
curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}
TEST_PKGS ?= ./...
.PHONY: test
test: TEST_FORMAT ?= short
test: SHELL = /bin/bash
test: export CGO_ENABLED=1
test: bin/gotestsum ## Run tests
@mkdir -p ${BUILD_DIR}
bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}
@ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
bin/golangci-lint-${GOLANGCI_VERSION}:
@mkdir -p bin
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION}
@mv bin/golangci-lint $@
.PHONY: lint
lint: bin/golangci-lint ## Run linter
bin/golangci-lint run
.PHONY: fix
fix: bin/golangci-lint ## Fix lint violations
bin/golangci-lint run --fix
# Add custom targets here
-include custom.mk
.PHONY: list
list: ## List all make targets
@${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
.PHONY: help
.DEFAULT_GOAL := help
help:
@grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
# Variable outputting/exporting rules
var-%: ; @echo $($*)
varexport-%: ; @echo $*=$($*)
| 8,571 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/util.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Viper is a application configuration system.
// It believes that applications can be configured a variety of ways
// via flags, ENVIRONMENT variables, configuration files retrieved
// from the file system, or a remote key/value store.
package viper
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"unicode"
"github.com/spf13/afero"
"github.com/spf13/cast"
jww "github.com/spf13/jwalterweatherman"
)
// ConfigParseError denotes failing to parse configuration file.
type ConfigParseError struct {
err error
}
// Error returns the formatted configuration error.
func (pe ConfigParseError) Error() string {
return fmt.Sprintf("While parsing config: %s", pe.err.Error())
}
// toCaseInsensitiveValue checks if the value is a map;
// if so, create a copy and lower-case the keys recursively.
func toCaseInsensitiveValue(value interface{}) interface{} {
switch v := value.(type) {
case map[interface{}]interface{}:
value = copyAndInsensitiviseMap(cast.ToStringMap(v))
case map[string]interface{}:
value = copyAndInsensitiviseMap(v)
}
return value
}
// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
// any map it makes case insensitive.
func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
nm := make(map[string]interface{})
for key, val := range m {
lkey := strings.ToLower(key)
switch v := val.(type) {
case map[interface{}]interface{}:
nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
case map[string]interface{}:
nm[lkey] = copyAndInsensitiviseMap(v)
default:
nm[lkey] = v
}
}
return nm
}
func insensitiviseMap(m map[string]interface{}) {
for key, val := range m {
switch val.(type) {
case map[interface{}]interface{}:
// nested map: cast and recursively insensitivise
val = cast.ToStringMap(val)
insensitiviseMap(val.(map[string]interface{}))
case map[string]interface{}:
// nested map: recursively insensitivise
insensitiviseMap(val.(map[string]interface{}))
}
lower := strings.ToLower(key)
if key != lower {
// remove old key (not lower-cased)
delete(m, key)
}
// update map
m[lower] = val
}
}
func absPathify(inPath string) string {
jww.INFO.Println("Trying to resolve absolute path to", inPath)
if strings.HasPrefix(inPath, "$HOME") {
inPath = userHomeDir() + inPath[5:]
}
if strings.HasPrefix(inPath, "$") {
end := strings.Index(inPath, string(os.PathSeparator))
inPath = os.Getenv(inPath[1:end]) + inPath[end:]
}
if filepath.IsAbs(inPath) {
return filepath.Clean(inPath)
}
p, err := filepath.Abs(inPath)
if err == nil {
return filepath.Clean(p)
}
jww.ERROR.Println("Couldn't discover absolute path")
jww.ERROR.Println(err)
return ""
}
// Check if file Exists
func exists(fs afero.Fs, path string) (bool, error) {
stat, err := fs.Stat(path)
if err == nil {
return !stat.IsDir(), nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func userHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
func safeMul(a, b uint) uint {
c := a * b
if a > 1 && b > 1 && c/b != a {
return 0
}
return c
}
// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
func parseSizeInBytes(sizeStr string) uint {
sizeStr = strings.TrimSpace(sizeStr)
lastChar := len(sizeStr) - 1
multiplier := uint(1)
if lastChar > 0 {
if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
if lastChar > 1 {
switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
case 'k':
multiplier = 1 << 10
sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
case 'm':
multiplier = 1 << 20
sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
case 'g':
multiplier = 1 << 30
sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
default:
multiplier = 1
sizeStr = strings.TrimSpace(sizeStr[:lastChar])
}
}
}
}
size := cast.ToInt(sizeStr)
if size < 0 {
size = 0
}
return safeMul(uint(size), multiplier)
}
// deepSearch scans deep maps, following the key indexes listed in the
// sequence "path".
// The last value is expected to be another map, and is returned.
//
// In case intermediate keys do not exist, or map to a non-map value,
// a new map is created and inserted, and the search continues from there:
// the initial map "m" may be modified!
func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
for _, k := range path {
m2, ok := m[k]
if !ok {
// intermediate key does not exist
// => create it and continue from there
m3 := make(map[string]interface{})
m[k] = m3
m = m3
continue
}
m3, ok := m2.(map[string]interface{})
if !ok {
// intermediate key is a value
// => replace with a new map
m3 = make(map[string]interface{})
m[k] = m3
}
// continue search from here
m = m3
}
return m
}
| 8,572 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/.golangci.yml | linters-settings:
golint:
min-confidence: 0.1
goimports:
local-prefixes: github.com/spf13/viper
linters:
enable-all: true
disable:
- funlen
- maligned
# TODO: fix me
- wsl
- gochecknoinits
- gosimple
- gochecknoglobals
- errcheck
- lll
- godox
- scopelint
- gocyclo
- gocognit
- gocritic
service:
golangci-lint-version: 1.21.x
| 8,573 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/LICENSE | The MIT License (MIT)
Copyright (c) 2014 Steve Francia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | 8,574 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/viper.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Viper is an application configuration system.
// It believes that applications can be configured a variety of ways
// via flags, ENVIRONMENT variables, configuration files retrieved
// from the file system, or a remote key/value store.
// Each item takes precedence over the item below it:
// overrides
// flag
// env
// config
// key/value store
// default
package viper
import (
"bytes"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/printer"
"github.com/magiconair/properties"
"github.com/mitchellh/mapstructure"
"github.com/pelletier/go-toml"
"github.com/spf13/afero"
"github.com/spf13/cast"
jww "github.com/spf13/jwalterweatherman"
"github.com/spf13/pflag"
"github.com/subosito/gotenv"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"
)
// ConfigMarshalError happens when failing to marshal the configuration.
type ConfigMarshalError struct {
err error
}
// Error returns the formatted configuration error.
func (e ConfigMarshalError) Error() string {
return fmt.Sprintf("While marshaling config: %s", e.err.Error())
}
var v *Viper
type RemoteResponse struct {
Value []byte
Error error
}
func init() {
v = New()
}
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
var RemoteConfig remoteConfigFactory
// UnsupportedConfigError denotes encountering an unsupported
// configuration filetype.
type UnsupportedConfigError string
// Error returns the formatted configuration error.
func (str UnsupportedConfigError) Error() string {
return fmt.Sprintf("Unsupported Config Type %q", string(str))
}
// UnsupportedRemoteProviderError denotes encountering an unsupported remote
// provider. Currently only etcd and Consul are supported.
type UnsupportedRemoteProviderError string
// Error returns the formatted remote provider error.
func (str UnsupportedRemoteProviderError) Error() string {
return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
}
// RemoteConfigError denotes encountering an error while trying to
// pull the configuration from the remote provider.
type RemoteConfigError string
// Error returns the formatted remote provider error
func (rce RemoteConfigError) Error() string {
return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
}
// ConfigFileNotFoundError denotes failing to find configuration file.
type ConfigFileNotFoundError struct {
name, locations string
}
// Error returns the formatted configuration error.
func (fnfe ConfigFileNotFoundError) Error() string {
return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
}
// ConfigFileAlreadyExistsError denotes failure to write new configuration file.
type ConfigFileAlreadyExistsError string
// Error returns the formatted error when configuration already exists.
func (faee ConfigFileAlreadyExistsError) Error() string {
return fmt.Sprintf("Config File %q Already Exists", string(faee))
}
// A DecoderConfigOption can be passed to viper.Unmarshal to configure
// mapstructure.DecoderConfig options
type DecoderConfigOption func(*mapstructure.DecoderConfig)
// DecodeHook returns a DecoderConfigOption which overrides the default
// DecoderConfig.DecodeHook value, the default is:
//
// mapstructure.ComposeDecodeHookFunc(
// mapstructure.StringToTimeDurationHookFunc(),
// mapstructure.StringToSliceHookFunc(","),
// )
func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.DecodeHook = hook
}
}
// Viper is a prioritized configuration registry. It
// maintains a set of configuration sources, fetches
// values to populate those, and provides them according
// to the source's priority.
// The priority of the sources is the following:
// 1. overrides
// 2. flags
// 3. env. variables
// 4. config file
// 5. key/value store
// 6. defaults
//
// For example, if values from the following sources were loaded:
//
// Defaults : {
// "secret": "",
// "user": "default",
// "endpoint": "https://localhost"
// }
// Config : {
// "user": "root"
// "secret": "defaultsecret"
// }
// Env : {
// "secret": "somesecretkey"
// }
//
// The resulting config will have the following values:
//
// {
// "secret": "somesecretkey",
// "user": "root",
// "endpoint": "https://localhost"
// }
type Viper struct {
// Delimiter that separates a list of keys
// used to access a nested value in one go
keyDelim string
// A set of paths to look for the config file in
configPaths []string
// The filesystem to read config from.
fs afero.Fs
// A set of remote providers to search for the configuration
remoteProviders []*defaultRemoteProvider
// Name of file to look for inside the path
configName string
configFile string
configType string
configPermissions os.FileMode
envPrefix string
automaticEnvApplied bool
envKeyReplacer StringReplacer
allowEmptyEnv bool
config map[string]interface{}
override map[string]interface{}
defaults map[string]interface{}
kvstore map[string]interface{}
pflags map[string]FlagValue
env map[string]string
aliases map[string]string
typeByDefValue bool
// Store read properties on the object so that we can write back in order with comments.
// This will only be used if the configuration read is a properties file.
properties *properties.Properties
onConfigChange func(fsnotify.Event)
}
// New returns an initialized Viper instance.
func New() *Viper {
v := new(Viper)
v.keyDelim = "."
v.configName = "config"
v.configPermissions = os.FileMode(0644)
v.fs = afero.NewOsFs()
v.config = make(map[string]interface{})
v.override = make(map[string]interface{})
v.defaults = make(map[string]interface{})
v.kvstore = make(map[string]interface{})
v.pflags = make(map[string]FlagValue)
v.env = make(map[string]string)
v.aliases = make(map[string]string)
v.typeByDefValue = false
return v
}
// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney.
// If you're unfamiliar with this style,
// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis.
type Option interface {
apply(v *Viper)
}
type optionFunc func(v *Viper)
func (fn optionFunc) apply(v *Viper) {
fn(v)
}
// KeyDelimiter sets the delimiter used for determining key parts.
// By default it's value is ".".
func KeyDelimiter(d string) Option {
return optionFunc(func(v *Viper) {
v.keyDelim = d
})
}
// StringReplacer applies a set of replacements to a string.
type StringReplacer interface {
// Replace returns a copy of s with all replacements performed.
Replace(s string) string
}
// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys.
func EnvKeyReplacer(r StringReplacer) Option {
return optionFunc(func(v *Viper) {
v.envKeyReplacer = r
})
}
// NewWithOptions creates a new Viper instance.
func NewWithOptions(opts ...Option) *Viper {
v := New()
for _, opt := range opts {
opt.apply(v)
}
return v
}
// Reset is intended for testing, will reset all to default settings.
// In the public interface for the viper package so applications
// can use it in their testing as well.
func Reset() {
v = New()
SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
}
type defaultRemoteProvider struct {
provider string
endpoint string
path string
secretKeyring string
}
func (rp defaultRemoteProvider) Provider() string {
return rp.provider
}
func (rp defaultRemoteProvider) Endpoint() string {
return rp.endpoint
}
func (rp defaultRemoteProvider) Path() string {
return rp.path
}
func (rp defaultRemoteProvider) SecretKeyring() string {
return rp.secretKeyring
}
// RemoteProvider stores the configuration necessary
// to connect to a remote key/value store.
// Optional secretKeyring to unencrypt encrypted values
// can be provided.
type RemoteProvider interface {
Provider() string
Endpoint() string
Path() string
SecretKeyring() string
}
// SupportedExts are universally supported extensions.
var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
// SupportedRemoteProviders are universally supported remote providers.
var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
v.onConfigChange = run
}
func WatchConfig() { v.WatchConfig() }
func (v *Viper) WatchConfig() {
initWG := sync.WaitGroup{}
initWG.Add(1)
go func() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
filename, err := v.getConfigFile()
if err != nil {
log.Printf("error: %v\n", err)
initWG.Done()
return
}
configFile := filepath.Clean(filename)
configDir, _ := filepath.Split(configFile)
realConfigFile, _ := filepath.EvalSymlinks(filename)
eventsWG := sync.WaitGroup{}
eventsWG.Add(1)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok { // 'Events' channel is closed
eventsWG.Done()
return
}
currentConfigFile, _ := filepath.EvalSymlinks(filename)
// we only care about the config file with the following cases:
// 1 - if the config file was modified or created
// 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
const writeOrCreateMask = fsnotify.Write | fsnotify.Create
if (filepath.Clean(event.Name) == configFile &&
event.Op&writeOrCreateMask != 0) ||
(currentConfigFile != "" && currentConfigFile != realConfigFile) {
realConfigFile = currentConfigFile
err := v.ReadInConfig()
if err != nil {
log.Printf("error reading config file: %v\n", err)
}
if v.onConfigChange != nil {
v.onConfigChange(event)
}
} else if filepath.Clean(event.Name) == configFile &&
event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
eventsWG.Done()
return
}
case err, ok := <-watcher.Errors:
if ok { // 'Errors' channel is not closed
log.Printf("watcher error: %v\n", err)
}
eventsWG.Done()
return
}
}
}()
watcher.Add(configDir)
initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
eventsWG.Wait() // now, wait for event loop to end in this go-routine...
}()
initWG.Wait() // make sure that the go routine above fully ended before returning
}
// SetConfigFile explicitly defines the path, name and extension of the config file.
// Viper will use this and not check any of the config paths.
func SetConfigFile(in string) { v.SetConfigFile(in) }
func (v *Viper) SetConfigFile(in string) {
if in != "" {
v.configFile = in
}
}
// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
// E.g. if your prefix is "spf", the env registry will look for env
// variables that start with "SPF_".
func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
func (v *Viper) SetEnvPrefix(in string) {
if in != "" {
v.envPrefix = in
}
}
func (v *Viper) mergeWithEnvPrefix(in string) string {
if v.envPrefix != "" {
return strings.ToUpper(v.envPrefix + "_" + in)
}
return strings.ToUpper(in)
}
// AllowEmptyEnv tells Viper to consider set,
// but empty environment variables as valid values instead of falling back.
// For backward compatibility reasons this is false by default.
func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
v.allowEmptyEnv = allowEmptyEnv
}
// TODO: should getEnv logic be moved into find(). Can generalize the use of
// rewriting keys many things, Ex: Get('someKey') -> some_key
// (camel case to snake case for JSON keys perhaps)
// getEnv is a wrapper around os.Getenv which replaces characters in the original
// key. This allows env vars which have different keys than the config object
// keys.
func (v *Viper) getEnv(key string) (string, bool) {
if v.envKeyReplacer != nil {
key = v.envKeyReplacer.Replace(key)
}
val, ok := os.LookupEnv(key)
return val, ok && (v.allowEmptyEnv || val != "")
}
// ConfigFileUsed returns the file used to populate the config registry.
func ConfigFileUsed() string { return v.ConfigFileUsed() }
func (v *Viper) ConfigFileUsed() string { return v.configFile }
// AddConfigPath adds a path for Viper to search for the config file in.
// Can be called multiple times to define multiple search paths.
func AddConfigPath(in string) { v.AddConfigPath(in) }
func (v *Viper) AddConfigPath(in string) {
if in != "" {
absin := absPathify(in)
jww.INFO.Println("adding", absin, "to paths to search")
if !stringInSlice(absin, v.configPaths) {
v.configPaths = append(v.configPaths, absin)
}
}
}
// AddRemoteProvider adds a remote configuration source.
// Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
func AddRemoteProvider(provider, endpoint, path string) error {
return v.AddRemoteProvider(provider, endpoint, path)
}
func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
// AddSecureRemoteProvider adds a remote configuration source.
// Secure Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
// Secure Remote Providers are implemented with github.com/bketelsen/crypt
func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
}
func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
secretKeyring: secretkeyring,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
for _, y := range v.remoteProviders {
if reflect.DeepEqual(y, p) {
return true
}
}
return false
}
// searchMap recursively searches for a value for path in source map.
// Returns nil if not found.
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
next, ok := source[path[0]]
if ok {
// Fast path
if len(path) == 1 {
return next
}
// Nested case
switch next.(type) {
case map[interface{}]interface{}:
return v.searchMap(cast.ToStringMap(next), path[1:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
return v.searchMap(next.(map[string]interface{}), path[1:])
default:
// got a value but nested key expected, return "nil" for not found
return nil
}
}
return nil
}
// searchMapWithPathPrefixes recursively searches for a value for path in source map.
//
// While searchMap() considers each path element as a single map key, this
// function searches for, and prioritizes, merged path elements.
// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
// is also defined, this latter value is returned for path ["foo", "bar"].
//
// This should be useful only at config level (other maps may not contain dots
// in their keys).
//
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
// search for path prefixes, starting from the longest one
for i := len(path); i > 0; i-- {
prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
next, ok := source[prefixKey]
if ok {
// Fast path
if i == len(path) {
return next
}
// Nested case
var val interface{}
switch next.(type) {
case map[interface{}]interface{}:
val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
default:
// got a value but nested key expected, do nothing and look for next prefix
}
if val != nil {
return val
}
}
}
// not found
return nil
}
// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
// on its path in the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
var parentVal interface{}
for i := 1; i < len(path); i++ {
parentVal = v.searchMap(m, path[0:i])
if parentVal == nil {
// not found, no need to add more path elements
return ""
}
switch parentVal.(type) {
case map[interface{}]interface{}:
continue
case map[string]interface{}:
continue
default:
// parentVal is a regular value which shadows "path"
return strings.Join(path[0:i], v.keyDelim)
}
}
return ""
}
// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
// in a sub-path of the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
// unify input map
var m map[string]interface{}
switch mi.(type) {
case map[string]string, map[string]FlagValue:
m = cast.ToStringMap(mi)
default:
return ""
}
// scan paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := m[parentKey]; ok {
return parentKey
}
}
return ""
}
// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
// in the environment, when automatic env is on.
// e.g., if "foo.bar" has a value in the environment, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
return parentKey
}
}
return ""
}
// SetTypeByDefaultValue enables or disables the inference of a key value's
// type when the Get function is used based upon a key's default value as
// opposed to the value returned based on the normal fetch logic.
//
// For example, if a key has a default value of []string{} and the same key
// is set via an environment variable to "a b c", a call to the Get function
// would return a string slice for the key if the key's type is inferred by
// the default value and the Get function would return:
//
// []string {"a", "b", "c"}
//
// Otherwise the Get function would return:
//
// "a b c"
func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
func (v *Viper) SetTypeByDefaultValue(enable bool) {
v.typeByDefValue = enable
}
// GetViper gets the global Viper instance.
func GetViper() *Viper {
return v
}
// Get can retrieve any value given the key to use.
// Get is case-insensitive for a key.
// Get has the behavior of returning the value associated with the first
// place from where it is set. Viper will check in the following order:
// override, flag, env, config file, key/value store, default
//
// Get returns an interface. For a specific value use one of the Get____ methods.
func Get(key string) interface{} { return v.Get(key) }
func (v *Viper) Get(key string) interface{} {
lcaseKey := strings.ToLower(key)
val := v.find(lcaseKey, true)
if val == nil {
return nil
}
if v.typeByDefValue {
// TODO(bep) this branch isn't covered by a single test.
valType := val
path := strings.Split(lcaseKey, v.keyDelim)
defVal := v.searchMap(v.defaults, path)
if defVal != nil {
valType = defVal
}
switch valType.(type) {
case bool:
return cast.ToBool(val)
case string:
return cast.ToString(val)
case int32, int16, int8, int:
return cast.ToInt(val)
case uint:
return cast.ToUint(val)
case uint32:
return cast.ToUint32(val)
case uint64:
return cast.ToUint64(val)
case int64:
return cast.ToInt64(val)
case float64, float32:
return cast.ToFloat64(val)
case time.Time:
return cast.ToTime(val)
case time.Duration:
return cast.ToDuration(val)
case []string:
return cast.ToStringSlice(val)
case []int:
return cast.ToIntSlice(val)
}
}
return val
}
// Sub returns new Viper instance representing a sub tree of this instance.
// Sub is case-insensitive for a key.
func Sub(key string) *Viper { return v.Sub(key) }
func (v *Viper) Sub(key string) *Viper {
subv := New()
data := v.Get(key)
if data == nil {
return nil
}
if reflect.TypeOf(data).Kind() == reflect.Map {
subv.config = cast.ToStringMap(data)
return subv
}
return nil
}
// GetString returns the value associated with the key as a string.
func GetString(key string) string { return v.GetString(key) }
func (v *Viper) GetString(key string) string {
return cast.ToString(v.Get(key))
}
// GetBool returns the value associated with the key as a boolean.
func GetBool(key string) bool { return v.GetBool(key) }
func (v *Viper) GetBool(key string) bool {
return cast.ToBool(v.Get(key))
}
// GetInt returns the value associated with the key as an integer.
func GetInt(key string) int { return v.GetInt(key) }
func (v *Viper) GetInt(key string) int {
return cast.ToInt(v.Get(key))
}
// GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Viper) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
// GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Viper) GetInt64(key string) int64 {
return cast.ToInt64(v.Get(key))
}
// GetUint returns the value associated with the key as an unsigned integer.
func GetUint(key string) uint { return v.GetUint(key) }
func (v *Viper) GetUint(key string) uint {
return cast.ToUint(v.Get(key))
}
// GetUint32 returns the value associated with the key as an unsigned integer.
func GetUint32(key string) uint32 { return v.GetUint32(key) }
func (v *Viper) GetUint32(key string) uint32 {
return cast.ToUint32(v.Get(key))
}
// GetUint64 returns the value associated with the key as an unsigned integer.
func GetUint64(key string) uint64 { return v.GetUint64(key) }
func (v *Viper) GetUint64(key string) uint64 {
return cast.ToUint64(v.Get(key))
}
// GetFloat64 returns the value associated with the key as a float64.
func GetFloat64(key string) float64 { return v.GetFloat64(key) }
func (v *Viper) GetFloat64(key string) float64 {
return cast.ToFloat64(v.Get(key))
}
// GetTime returns the value associated with the key as time.
func GetTime(key string) time.Time { return v.GetTime(key) }
func (v *Viper) GetTime(key string) time.Time {
return cast.ToTime(v.Get(key))
}
// GetDuration returns the value associated with the key as a duration.
func GetDuration(key string) time.Duration { return v.GetDuration(key) }
func (v *Viper) GetDuration(key string) time.Duration {
return cast.ToDuration(v.Get(key))
}
// GetIntSlice returns the value associated with the key as a slice of int values.
func GetIntSlice(key string) []int { return v.GetIntSlice(key) }
func (v *Viper) GetIntSlice(key string) []int {
return cast.ToIntSlice(v.Get(key))
}
// GetStringSlice returns the value associated with the key as a slice of strings.
func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
func (v *Viper) GetStringSlice(key string) []string {
return cast.ToStringSlice(v.Get(key))
}
// GetStringMap returns the value associated with the key as a map of interfaces.
func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
func (v *Viper) GetStringMap(key string) map[string]interface{} {
return cast.ToStringMap(v.Get(key))
}
// GetStringMapString returns the value associated with the key as a map of strings.
func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
func (v *Viper) GetStringMapString(key string) map[string]string {
return cast.ToStringMapString(v.Get(key))
}
// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
return cast.ToStringMapStringSlice(v.Get(key))
}
// GetSizeInBytes returns the size of the value associated with the given key
// in bytes.
func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
func (v *Viper) GetSizeInBytes(key string) uint {
sizeStr := cast.ToString(v.Get(key))
return parseSizeInBytes(sizeStr)
}
// UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalKey(key, rawVal, opts...)
}
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
// on the fields of the structure are properly set.
func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.Unmarshal(rawVal, opts...)
}
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
// of time.Duration values & string slices
func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
c := &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
}
for _, opt := range opts {
opt(c)
}
return c
}
// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
func decode(input interface{}, config *mapstructure.DecoderConfig) error {
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
// in the destination struct.
func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalExact(rawVal, opts...)
}
func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
config := defaultDecoderConfig(rawVal, opts...)
config.ErrorUnused = true
err := decode(v.AllSettings(), config)
if err != nil {
return err
}
return nil
}
// BindPFlags binds a full flag set to the configuration, using each flag's long
// name as the config key.
func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
return v.BindFlagValues(pflagValueSet{flags})
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
//
func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
return v.BindFlagValue(key, pflagValue{flag})
}
// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
// name as the config key.
func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
flags.VisitAll(func(flag FlagValue) {
if err = v.BindFlagValue(flag.Name(), flag); err != nil {
return
}
})
return nil
}
// BindFlagValue binds a specific key to a FlagValue.
func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
if flag == nil {
return fmt.Errorf("flag for %q is nil", key)
}
v.pflags[strings.ToLower(key)] = flag
return nil
}
// BindEnv binds a Viper key to a ENV variable.
// ENV variables are case sensitive.
// If only a key is provided, it will use the env key matching the key, uppercased.
// EnvPrefix will be used when set when env name is not provided.
func BindEnv(input ...string) error { return v.BindEnv(input...) }
func (v *Viper) BindEnv(input ...string) error {
var key, envkey string
if len(input) == 0 {
return fmt.Errorf("missing key to bind to")
}
key = strings.ToLower(input[0])
if len(input) == 1 {
envkey = v.mergeWithEnvPrefix(key)
} else {
envkey = input[1]
}
v.env[key] = envkey
return nil
}
// Given a key, find the value.
//
// Viper will check to see if an alias exists first.
// Viper will then check in the following order:
// flag, env, config file, key/value store.
// Lastly, if no value was found and flagDefault is true, and if the key
// corresponds to a flag, the flag's default value is returned.
//
// Note: this assumes a lower-cased key given.
func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
var (
val interface{}
exists bool
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
)
// compute the path through the nested maps to the nested value
if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
return nil
}
// if the requested key is an alias, then return the proper key
lcaseKey = v.realKey(lcaseKey)
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
// Set() override first
val = v.searchMap(v.override, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
return nil
}
// PFlag override next
flag, exists := v.pflags[lcaseKey]
if exists && flag.HasChanged() {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "stringToString":
return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
}
if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
return nil
}
// Env override next
if v.automaticEnvApplied {
// even if it hasn't been registered, if automaticEnv is used,
// check any Get request
if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
return val
}
if nested && v.isPathShadowedInAutoEnv(path) != "" {
return nil
}
}
envkey, exists := v.env[lcaseKey]
if exists {
if val, ok := v.getEnv(envkey); ok {
return val
}
}
if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
return nil
}
// Config file next
val = v.searchMapWithPathPrefixes(v.config, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
return nil
}
// K/V store next
val = v.searchMap(v.kvstore, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
return nil
}
// Default next
val = v.searchMap(v.defaults, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
return nil
}
if flagDefault {
// last chance: if no value is found and a flag does exist for the key,
// get the flag's default value even if the flag's value has not been set.
if flag, exists := v.pflags[lcaseKey]; exists {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "stringToString":
return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
}
// last item, no need to check shadowing
}
return nil
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79
// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap
func stringToStringConv(val string) interface{} {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]interface{}{}
}
r := csv.NewReader(strings.NewReader(val))
ss, err := r.Read()
if err != nil {
return nil
}
out := make(map[string]interface{}, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil
}
out[kv[0]] = kv[1]
}
return out
}
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key.
func IsSet(key string) bool { return v.IsSet(key) }
func (v *Viper) IsSet(key string) bool {
lcaseKey := strings.ToLower(key)
val := v.find(lcaseKey, false)
return val != nil
}
// AutomaticEnv has Viper check ENV variables for all.
// keys set in config, default & flags
func AutomaticEnv() { v.AutomaticEnv() }
func (v *Viper) AutomaticEnv() {
v.automaticEnvApplied = true
}
// SetEnvKeyReplacer sets the strings.Replacer on the viper object
// Useful for mapping an environmental variable to a key that does
// not match it.
func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
v.envKeyReplacer = r
}
// RegisterAlias creates an alias that provides another accessor for the same key.
// This enables one to change a name without breaking the application.
func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
func (v *Viper) RegisterAlias(alias string, key string) {
v.registerAlias(alias, strings.ToLower(key))
}
func (v *Viper) registerAlias(alias string, key string) {
alias = strings.ToLower(alias)
if alias != key && alias != v.realKey(key) {
_, exists := v.aliases[alias]
if !exists {
// if we alias something that exists in one of the maps to another
// name, we'll never be able to get that value using the original
// name, so move the config value to the new realkey.
if val, ok := v.config[alias]; ok {
delete(v.config, alias)
v.config[key] = val
}
if val, ok := v.kvstore[alias]; ok {
delete(v.kvstore, alias)
v.kvstore[key] = val
}
if val, ok := v.defaults[alias]; ok {
delete(v.defaults, alias)
v.defaults[key] = val
}
if val, ok := v.override[alias]; ok {
delete(v.override, alias)
v.override[key] = val
}
v.aliases[alias] = key
}
} else {
jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
}
}
func (v *Viper) realKey(key string) string {
newkey, exists := v.aliases[key]
if exists {
jww.DEBUG.Println("Alias", key, "to", newkey)
return v.realKey(newkey)
}
return key
}
// InConfig checks to see if the given key (or an alias) is in the config file.
func InConfig(key string) bool { return v.InConfig(key) }
func (v *Viper) InConfig(key string) bool {
// if the requested key is an alias, then return the proper key
key = v.realKey(key)
_, exists := v.config[key]
return exists
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
func (v *Viper) SetDefault(key string, value interface{}) {
// If alias passed in, then set the proper default
key = v.realKey(strings.ToLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// Set sets the value for the key in the override register.
// Set is case-insensitive for a key.
// Will be used instead of values obtained via
// flags, config file, ENV, default, or key/value store.
func Set(key string, value interface{}) { v.Set(key, value) }
func (v *Viper) Set(key string, value interface{}) {
// If alias passed in, then set the proper override
key = v.realKey(strings.ToLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(v.override, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// ReadInConfig will discover and load the configuration file from disk
// and key/value stores, searching in one of the defined paths.
func ReadInConfig() error { return v.ReadInConfig() }
func (v *Viper) ReadInConfig() error {
jww.INFO.Println("Attempting to read in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
jww.DEBUG.Println("Reading file: ", filename)
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
config := make(map[string]interface{})
err = v.unmarshalReader(bytes.NewReader(file), config)
if err != nil {
return err
}
v.config = config
return nil
}
// MergeInConfig merges a new configuration with an existing config.
func MergeInConfig() error { return v.MergeInConfig() }
func (v *Viper) MergeInConfig() error {
jww.INFO.Println("Attempting to merge in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
return v.MergeConfig(bytes.NewReader(file))
}
// ReadConfig will read a configuration file, setting existing keys to nil if the
// key does not exist in the file.
func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
func (v *Viper) ReadConfig(in io.Reader) error {
v.config = make(map[string]interface{})
return v.unmarshalReader(in, v.config)
}
// MergeConfig merges a new configuration with an existing config.
func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
func (v *Viper) MergeConfig(in io.Reader) error {
cfg := make(map[string]interface{})
if err := v.unmarshalReader(in, cfg); err != nil {
return err
}
return v.MergeConfigMap(cfg)
}
// MergeConfigMap merges the configuration from the map given with an existing config.
// Note that the map given may be modified.
func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
if v.config == nil {
v.config = make(map[string]interface{})
}
insensitiviseMap(cfg)
mergeMaps(cfg, v.config, nil)
return nil
}
// WriteConfig writes the current configuration to a file.
func WriteConfig() error { return v.WriteConfig() }
func (v *Viper) WriteConfig() error {
filename, err := v.getConfigFile()
if err != nil {
return err
}
return v.writeConfig(filename, true)
}
// SafeWriteConfig writes current configuration to file only if the file does not exist.
func SafeWriteConfig() error { return v.SafeWriteConfig() }
func (v *Viper) SafeWriteConfig() error {
if len(v.configPaths) < 1 {
return errors.New("missing configuration for 'configPath'")
}
return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType))
}
// WriteConfigAs writes current configuration to a given filename.
func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
func (v *Viper) WriteConfigAs(filename string) error {
return v.writeConfig(filename, true)
}
// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
func (v *Viper) SafeWriteConfigAs(filename string) error {
alreadyExists, err := afero.Exists(v.fs, filename)
if alreadyExists && err == nil {
return ConfigFileAlreadyExistsError(filename)
}
return v.writeConfig(filename, false)
}
func (v *Viper) writeConfig(filename string, force bool) error {
jww.INFO.Println("Attempting to write configuration to file.")
var configType string
ext := filepath.Ext(filename)
if ext != "" {
configType = ext[1:]
} else {
configType = v.configType
}
if configType == "" {
return fmt.Errorf("config type could not be determined for %s", filename)
}
if !stringInSlice(configType, SupportedExts) {
return UnsupportedConfigError(configType)
}
if v.config == nil {
v.config = make(map[string]interface{})
}
flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
if !force {
flags |= os.O_EXCL
}
f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
if err != nil {
return err
}
defer f.Close()
if err := v.marshalWriter(f, configType); err != nil {
return err
}
return f.Sync()
}
// Unmarshal a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
return v.unmarshalReader(in, c)
}
func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
buf := new(bytes.Buffer)
buf.ReadFrom(in)
switch strings.ToLower(v.getConfigType()) {
case "yaml", "yml":
if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "json":
if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "hcl":
obj, err := hcl.Parse(buf.String())
if err != nil {
return ConfigParseError{err}
}
if err = hcl.DecodeObject(&c, obj); err != nil {
return ConfigParseError{err}
}
case "toml":
tree, err := toml.LoadReader(buf)
if err != nil {
return ConfigParseError{err}
}
tmap := tree.ToMap()
for k, v := range tmap {
c[k] = v
}
case "dotenv", "env":
env, err := gotenv.StrictParse(buf)
if err != nil {
return ConfigParseError{err}
}
for k, v := range env {
c[k] = v
}
case "properties", "props", "prop":
v.properties = properties.NewProperties()
var err error
if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
return ConfigParseError{err}
}
for _, key := range v.properties.Keys() {
value, _ := v.properties.Get(key)
// recursively build nested maps
path := strings.Split(key, ".")
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(c, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
case "ini":
cfg := ini.Empty()
err := cfg.Append(buf.Bytes())
if err != nil {
return ConfigParseError{err}
}
sections := cfg.Sections()
for i := 0; i < len(sections); i++ {
section := sections[i]
keys := section.Keys()
for j := 0; j < len(keys); j++ {
key := keys[j]
value := cfg.Section(section.Name()).Key(key.Name()).String()
c[section.Name()+"."+key.Name()] = value
}
}
}
insensitiviseMap(c)
return nil
}
// Marshal a map into Writer.
func (v *Viper) marshalWriter(f afero.File, configType string) error {
c := v.AllSettings()
switch configType {
case "json":
b, err := json.MarshalIndent(c, "", " ")
if err != nil {
return ConfigMarshalError{err}
}
_, err = f.WriteString(string(b))
if err != nil {
return ConfigMarshalError{err}
}
case "hcl":
b, err := json.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
ast, err := hcl.Parse(string(b))
if err != nil {
return ConfigMarshalError{err}
}
err = printer.Fprint(f, ast.Node)
if err != nil {
return ConfigMarshalError{err}
}
case "prop", "props", "properties":
if v.properties == nil {
v.properties = properties.NewProperties()
}
p := v.properties
for _, key := range v.AllKeys() {
_, _, err := p.Set(key, v.GetString(key))
if err != nil {
return ConfigMarshalError{err}
}
}
_, err := p.WriteComment(f, "#", properties.UTF8)
if err != nil {
return ConfigMarshalError{err}
}
case "dotenv", "env":
lines := []string{}
for _, key := range v.AllKeys() {
envName := strings.ToUpper(strings.Replace(key, ".", "_", -1))
val := v.Get(key)
lines = append(lines, fmt.Sprintf("%v=%v", envName, val))
}
s := strings.Join(lines, "\n")
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "toml":
t, err := toml.TreeFromMap(c)
if err != nil {
return ConfigMarshalError{err}
}
s := t.String()
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "yaml", "yml":
b, err := yaml.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
if _, err = f.WriteString(string(b)); err != nil {
return ConfigMarshalError{err}
}
case "ini":
keys := v.AllKeys()
cfg := ini.Empty()
ini.PrettyFormat = false
for i := 0; i < len(keys); i++ {
key := keys[i]
lastSep := strings.LastIndex(key, ".")
sectionName := key[:(lastSep)]
keyName := key[(lastSep + 1):]
if sectionName == "default" {
sectionName = ""
}
cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string))
}
cfg.WriteTo(f)
}
return nil
}
func keyExists(k string, m map[string]interface{}) string {
lk := strings.ToLower(k)
for mk := range m {
lmk := strings.ToLower(mk)
if lmk == lk {
return mk
}
}
return ""
}
func castToMapStringInterface(
src map[interface{}]interface{}) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[fmt.Sprintf("%v", k)] = v
}
return tgt
}
func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
// insistence on parsing nested structures as `map[interface{}]interface{}`
// instead of using a `string` as the key for nest structures beyond one level
// deep. Both map types are supported as there is a go-yaml fork that uses
// `map[string]interface{}` instead.
func mergeMaps(
src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
for sk, sv := range src {
tk := keyExists(sk, tgt)
if tk == "" {
jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
tv, ok := tgt[tk]
if !ok {
jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
svType := reflect.TypeOf(sv)
tvType := reflect.TypeOf(tv)
if svType != tvType {
jww.ERROR.Printf(
"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
continue
}
jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
switch ttv := tv.(type) {
case map[interface{}]interface{}:
jww.TRACE.Printf("merging maps (must convert)")
tsv := sv.(map[interface{}]interface{})
ssv := castToMapStringInterface(tsv)
stv := castToMapStringInterface(ttv)
mergeMaps(ssv, stv, ttv)
case map[string]interface{}:
jww.TRACE.Printf("merging maps")
mergeMaps(sv.(map[string]interface{}), ttv, nil)
default:
jww.TRACE.Printf("setting value")
tgt[tk] = sv
if itgt != nil {
itgt[tk] = sv
}
}
}
}
// ReadRemoteConfig attempts to get configuration from a remote source
// and read it in the remote configuration registry.
func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
func (v *Viper) ReadRemoteConfig() error {
return v.getKeyValueConfig()
}
func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
func (v *Viper) WatchRemoteConfig() error {
return v.watchKeyValueConfig()
}
func (v *Viper) WatchRemoteConfigOnChannel() error {
return v.watchKeyValueConfigOnChannel()
}
// Retrieve the first found remote configuration.
func (v *Viper) getKeyValueConfig() error {
if RemoteConfig == nil {
return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
}
for _, rp := range v.remoteProviders {
val, err := v.getRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Get(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfigOnChannel() error {
for _, rp := range v.remoteProviders {
respc, _ := RemoteConfig.WatchChannel(rp)
// Todo: Add quit channel
go func(rc <-chan *RemoteResponse) {
for {
b := <-rc
reader := bytes.NewReader(b.Value)
v.unmarshalReader(reader, v.kvstore)
}
}(respc)
return nil
}
return RemoteConfigError("No Files Found")
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfig() error {
for _, rp := range v.remoteProviders {
val, err := v.watchRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Watch(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// AllKeys returns all keys holding a value, regardless of where they are set.
// Nested keys are returned with a v.keyDelim separator
func AllKeys() []string { return v.AllKeys() }
func (v *Viper) AllKeys() []string {
m := map[string]bool{}
// add all paths, by order of descending priority to ensure correct shadowing
m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
m = v.flattenAndMergeMap(m, v.override, "")
m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
m = v.flattenAndMergeMap(m, v.config, "")
m = v.flattenAndMergeMap(m, v.kvstore, "")
m = v.flattenAndMergeMap(m, v.defaults, "")
// convert set of paths to list
a := make([]string, 0, len(m))
for x := range m {
a = append(a, x)
}
return a
}
// flattenAndMergeMap recursively flattens the given map into a map[string]bool
// of key paths (used as a set, easier to manipulate than a []string):
// - each path is merged into a single key string, delimited with v.keyDelim
// - if a path is shadowed by an earlier value in the initial shadow map,
// it is skipped.
// The resulting set of paths is merged to the given shadow set at the same time.
func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
if shadow != nil && prefix != "" && shadow[prefix] {
// prefix is shadowed => nothing more to flatten
return shadow
}
if shadow == nil {
shadow = make(map[string]bool)
}
var m2 map[string]interface{}
if prefix != "" {
prefix += v.keyDelim
}
for k, val := range m {
fullKey := prefix + k
switch val.(type) {
case map[string]interface{}:
m2 = val.(map[string]interface{})
case map[interface{}]interface{}:
m2 = cast.ToStringMap(val)
default:
// immediate value
shadow[strings.ToLower(fullKey)] = true
continue
}
// recursively merge to shadow map
shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
}
return shadow
}
// mergeFlatMap merges the given maps, excluding values of the second map
// shadowed by values from the first map.
func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
// scan keys
outer:
for k := range m {
path := strings.Split(k, v.keyDelim)
// scan intermediate paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if shadow[parentKey] {
// path is shadowed, continue
continue outer
}
}
// add key
shadow[strings.ToLower(k)] = true
}
return shadow
}
// AllSettings merges all settings and returns them as a map[string]interface{}.
func AllSettings() map[string]interface{} { return v.AllSettings() }
func (v *Viper) AllSettings() map[string]interface{} {
m := map[string]interface{}{}
// start from the list of keys, and construct the map one value at a time
for _, k := range v.AllKeys() {
value := v.Get(k)
if value == nil {
// should not happen, since AllKeys() returns only keys holding a value,
// check just in case anything changes
continue
}
path := strings.Split(k, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(m, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
return m
}
// SetFs sets the filesystem to use to read configuration.
func SetFs(fs afero.Fs) { v.SetFs(fs) }
func (v *Viper) SetFs(fs afero.Fs) {
v.fs = fs
}
// SetConfigName sets name for the config file.
// Does not include extension.
func SetConfigName(in string) { v.SetConfigName(in) }
func (v *Viper) SetConfigName(in string) {
if in != "" {
v.configName = in
v.configFile = ""
}
}
// SetConfigType sets the type of the configuration returned by the
// remote source, e.g. "json".
func SetConfigType(in string) { v.SetConfigType(in) }
func (v *Viper) SetConfigType(in string) {
if in != "" {
v.configType = in
}
}
// SetConfigPermissions sets the permissions for the config file.
func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) }
func (v *Viper) SetConfigPermissions(perm os.FileMode) {
v.configPermissions = perm.Perm()
}
func (v *Viper) getConfigType() string {
if v.configType != "" {
return v.configType
}
cf, err := v.getConfigFile()
if err != nil {
return ""
}
ext := filepath.Ext(cf)
if len(ext) > 1 {
return ext[1:]
}
return ""
}
func (v *Viper) getConfigFile() (string, error) {
if v.configFile == "" {
cf, err := v.findConfigFile()
if err != nil {
return "", err
}
v.configFile = cf
}
return v.configFile, nil
}
func (v *Viper) searchInPath(in string) (filename string) {
jww.DEBUG.Println("Searching for config in ", in)
for _, ext := range SupportedExts {
jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
return filepath.Join(in, v.configName+"."+ext)
}
}
if v.configType != "" {
if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
return filepath.Join(in, v.configName)
}
}
return ""
}
// Search all configPaths for any config file.
// Returns the first path that exists (and is a config file).
func (v *Viper) findConfigFile() (string, error) {
jww.INFO.Println("Searching for config in ", v.configPaths)
for _, cp := range v.configPaths {
file := v.searchInPath(cp)
if file != "" {
return file, nil
}
}
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
}
// Debug prints all configuration registries for debugging
// purposes.
func Debug() { v.Debug() }
func (v *Viper) Debug() {
fmt.Printf("Aliases:\n%#v\n", v.aliases)
fmt.Printf("Override:\n%#v\n", v.override)
fmt.Printf("PFlags:\n%#v\n", v.pflags)
fmt.Printf("Env:\n%#v\n", v.env)
fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
fmt.Printf("Config:\n%#v\n", v.config)
fmt.Printf("Defaults:\n%#v\n", v.defaults)
}
| 8,575 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/flags.go | package viper
import "github.com/spf13/pflag"
// FlagValueSet is an interface that users can implement
// to bind a set of flags to viper.
type FlagValueSet interface {
VisitAll(fn func(FlagValue))
}
// FlagValue is an interface that users can implement
// to bind different flags to viper.
type FlagValue interface {
HasChanged() bool
Name() string
ValueString() string
ValueType() string
}
// pflagValueSet is a wrapper around *pflag.ValueSet
// that implements FlagValueSet.
type pflagValueSet struct {
flags *pflag.FlagSet
}
// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
p.flags.VisitAll(func(flag *pflag.Flag) {
fn(pflagValue{flag})
})
}
// pflagValue is a wrapper aroung *pflag.flag
// that implements FlagValue
type pflagValue struct {
flag *pflag.Flag
}
// HasChanged returns whether the flag has changes or not.
func (p pflagValue) HasChanged() bool {
return p.flag.Changed
}
// Name returns the name of the flag.
func (p pflagValue) Name() string {
return p.flag.Name
}
// ValueString returns the value of the flag as a string.
func (p pflagValue) ValueString() string {
return p.flag.Value.String()
}
// ValueType returns the type of the flag as a string.
func (p pflagValue) ValueType() string {
return p.flag.Value.Type()
}
| 8,576 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/go.sum | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc h1:NCy3Ohtk6Iny5V/reW2Ktypo4zIpWBdRJ1uFMjBxdg8=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
| 8,577 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/viper/.editorconfig | root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{Makefile, *.mk}]
indent_style = tab
| 8,578 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/go.mod | module github.com/spf13/jwalterweatherman
| 8,579 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/notepad.go | // Copyright © 2016 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package jwalterweatherman
import (
"fmt"
"io"
"log"
)
type Threshold int
func (t Threshold) String() string {
return prefixes[t]
}
const (
LevelTrace Threshold = iota
LevelDebug
LevelInfo
LevelWarn
LevelError
LevelCritical
LevelFatal
)
var prefixes map[Threshold]string = map[Threshold]string{
LevelTrace: "TRACE",
LevelDebug: "DEBUG",
LevelInfo: "INFO",
LevelWarn: "WARN",
LevelError: "ERROR",
LevelCritical: "CRITICAL",
LevelFatal: "FATAL",
}
// Notepad is where you leave a note!
type Notepad struct {
TRACE *log.Logger
DEBUG *log.Logger
INFO *log.Logger
WARN *log.Logger
ERROR *log.Logger
CRITICAL *log.Logger
FATAL *log.Logger
LOG *log.Logger
FEEDBACK *Feedback
loggers [7]**log.Logger
logHandle io.Writer
outHandle io.Writer
logThreshold Threshold
stdoutThreshold Threshold
prefix string
flags int
// One per Threshold
logCounters [7]*logCounter
}
// NewNotepad create a new notepad.
func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
n := &Notepad{}
n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
n.outHandle = outHandle
n.logHandle = logHandle
n.stdoutThreshold = outThreshold
n.logThreshold = logThreshold
if len(prefix) != 0 {
n.prefix = "[" + prefix + "] "
} else {
n.prefix = ""
}
n.flags = flags
n.LOG = log.New(n.logHandle,
"LOG: ",
n.flags)
n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
n.init()
return n
}
// init creates the loggers for each level depending on the notepad thresholds.
func (n *Notepad) init() {
logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
for t, logger := range n.loggers {
threshold := Threshold(t)
counter := &logCounter{}
n.logCounters[t] = counter
prefix := n.prefix + threshold.String() + " "
switch {
case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
*logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
case threshold >= n.logThreshold:
*logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
case threshold >= n.stdoutThreshold:
*logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
default:
// counter doesn't care about prefix and flags, so don't use them
// for performance.
*logger = log.New(counter, "", 0)
}
}
}
// SetLogThreshold changes the threshold above which messages are written to the
// log file.
func (n *Notepad) SetLogThreshold(threshold Threshold) {
n.logThreshold = threshold
n.init()
}
// SetLogOutput changes the file where log messages are written.
func (n *Notepad) SetLogOutput(handle io.Writer) {
n.logHandle = handle
n.init()
}
// GetStdoutThreshold returns the defined Treshold for the log logger.
func (n *Notepad) GetLogThreshold() Threshold {
return n.logThreshold
}
// SetStdoutThreshold changes the threshold above which messages are written to the
// standard output.
func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
n.stdoutThreshold = threshold
n.init()
}
// GetStdoutThreshold returns the Treshold for the stdout logger.
func (n *Notepad) GetStdoutThreshold() Threshold {
return n.stdoutThreshold
}
// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
// brackets at the beginning of the line. An empty prefix won't be displayed at all.
func (n *Notepad) SetPrefix(prefix string) {
if len(prefix) != 0 {
n.prefix = "[" + prefix + "] "
} else {
n.prefix = ""
}
n.init()
}
// SetFlags choose which flags the logger will display (after prefix and message
// level). See the package log for more informations on this.
func (n *Notepad) SetFlags(flags int) {
n.flags = flags
n.init()
}
// Feedback writes plainly to the outHandle while
// logging with the standard extra information (date, file, etc).
type Feedback struct {
out *log.Logger
log *log.Logger
}
func (fb *Feedback) Println(v ...interface{}) {
fb.output(fmt.Sprintln(v...))
}
func (fb *Feedback) Printf(format string, v ...interface{}) {
fb.output(fmt.Sprintf(format, v...))
}
func (fb *Feedback) Print(v ...interface{}) {
fb.output(fmt.Sprint(v...))
}
func (fb *Feedback) output(s string) {
if fb.out != nil {
fb.out.Output(2, s)
}
if fb.log != nil {
fb.log.Output(2, s)
}
}
| 8,580 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/README.md | jWalterWeatherman
=================
Seamless printing to the terminal (stdout) and logging to a io.Writer
(file) that’s as easy to use as fmt.Println.

Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422)
JWW is primarily a wrapper around the excellent standard log library. It
provides a few advantages over using the standard log library alone.
1. Ready to go out of the box.
2. One library for both printing to the terminal and logging (to files).
3. Really easy to log to either a temp file or a file you specify.
I really wanted a very straightforward library that could seamlessly do
the following things.
1. Replace all the println, printf, etc statements thoughout my code with
something more useful
2. Allow the user to easily control what levels are printed to stdout
3. Allow the user to easily control what levels are logged
4. Provide an easy mechanism (like fmt.Println) to print info to the user
which can be easily logged as well
5. Due to 2 & 3 provide easy verbose mode for output and logs
6. Not have any unnecessary initialization cruft. Just use it.
# Usage
## Step 1. Use it
Put calls throughout your source based on type of feedback.
No initialization or setup needs to happen. Just start calling things.
Available Loggers are:
* TRACE
* DEBUG
* INFO
* WARN
* ERROR
* CRITICAL
* FATAL
These each are loggers based on the log standard library and follow the
standard usage. Eg.
```go
import (
jww "github.com/spf13/jwalterweatherman"
)
...
if err != nil {
// This is a pretty serious error and the user should know about
// it. It will be printed to the terminal as well as logged under the
// default thresholds.
jww.ERROR.Println(err)
}
if err2 != nil {
// This error isn’t going to materially change the behavior of the
// application, but it’s something that may not be what the user
// expects. Under the default thresholds, Warn will be logged, but
// not printed to the terminal.
jww.WARN.Println(err2)
}
// Information that’s relevant to what’s happening, but not very
// important for the user. Under the default thresholds this will be
// discarded.
jww.INFO.Printf("information %q", response)
```
NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
```go
notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
notepad.WARN.Println("Some warning"")
```
_Why 7 levels?_
Maybe you think that 7 levels are too much for any application... and you
are probably correct. Just because there are seven levels doesn’t mean
that you should be using all 7 levels. Pick the right set for your needs.
Remember they only have to mean something to your project.
## Step 2. Optionally configure JWW
Under the default thresholds :
* Debug, Trace & Info goto /dev/null
* Warn and above is logged (when a log file/io.Writer is provided)
* Error and above is printed to the terminal (stdout)
### Changing the thresholds
The threshold can be changed at any time, but will only affect calls that
execute after the change was made.
This is very useful if your application has a verbose mode. Of course you
can decide what verbose means to you or even have multiple levels of
verbosity.
```go
import (
jww "github.com/spf13/jwalterweatherman"
)
if Verbose {
jww.SetLogThreshold(jww.LevelTrace)
jww.SetStdoutThreshold(jww.LevelInfo)
}
```
Note that JWW's own internal output uses log levels as well, so set the log
level before making any other calls if you want to see what it's up to.
### Setting a log file
JWW can log to any `io.Writer`:
```go
jww.SetLogOutput(customWriter)
```
# More information
This is an early release. I’ve been using it for a while and this is the
third interface I’ve tried. I like this one pretty well, but no guarantees
that it won’t change a bit.
I wrote this for use in [hugo](https://gohugo.io). If you are looking
for a static website engine that’s super fast please checkout Hugo.
| 8,581 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/log_counter.go | // Copyright © 2016 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package jwalterweatherman
import (
"sync/atomic"
)
type logCounter struct {
counter uint64
}
func (c *logCounter) incr() {
atomic.AddUint64(&c.counter, 1)
}
func (c *logCounter) resetCounter() {
atomic.StoreUint64(&c.counter, 0)
}
func (c *logCounter) getCount() uint64 {
return atomic.LoadUint64(&c.counter)
}
func (c *logCounter) Write(p []byte) (n int, err error) {
c.incr()
return len(p), nil
}
// LogCountForLevel returns the number of log invocations for a given threshold.
func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
return n.logCounters[l].getCount()
}
// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
// greater than or equal to a given threshold.
func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
var cnt uint64
for i := int(threshold); i < len(n.logCounters); i++ {
cnt += n.LogCountForLevel(Threshold(i))
}
return cnt
}
// ResetLogCounters resets the invocation counters for all levels.
func (n *Notepad) ResetLogCounters() {
for _, np := range n.logCounters {
np.resetCounter()
}
}
| 8,582 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/LICENSE | The MIT License (MIT)
Copyright (c) 2014 Steve Francia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | 8,583 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/jwalterweatherman/default_notepad.go | // Copyright © 2016 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package jwalterweatherman
import (
"io"
"io/ioutil"
"log"
"os"
)
var (
TRACE *log.Logger
DEBUG *log.Logger
INFO *log.Logger
WARN *log.Logger
ERROR *log.Logger
CRITICAL *log.Logger
FATAL *log.Logger
LOG *log.Logger
FEEDBACK *Feedback
defaultNotepad *Notepad
)
func reloadDefaultNotepad() {
TRACE = defaultNotepad.TRACE
DEBUG = defaultNotepad.DEBUG
INFO = defaultNotepad.INFO
WARN = defaultNotepad.WARN
ERROR = defaultNotepad.ERROR
CRITICAL = defaultNotepad.CRITICAL
FATAL = defaultNotepad.FATAL
LOG = defaultNotepad.LOG
FEEDBACK = defaultNotepad.FEEDBACK
}
func init() {
defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
reloadDefaultNotepad()
}
// SetLogThreshold set the log threshold for the default notepad. Trace by default.
func SetLogThreshold(threshold Threshold) {
defaultNotepad.SetLogThreshold(threshold)
reloadDefaultNotepad()
}
// SetLogOutput set the log output for the default notepad. Discarded by default.
func SetLogOutput(handle io.Writer) {
defaultNotepad.SetLogOutput(handle)
reloadDefaultNotepad()
}
// SetStdoutThreshold set the standard output threshold for the default notepad.
// Info by default.
func SetStdoutThreshold(threshold Threshold) {
defaultNotepad.SetStdoutThreshold(threshold)
reloadDefaultNotepad()
}
// SetPrefix set the prefix for the default logger. Empty by default.
func SetPrefix(prefix string) {
defaultNotepad.SetPrefix(prefix)
reloadDefaultNotepad()
}
// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
func SetFlags(flags int) {
defaultNotepad.SetFlags(flags)
reloadDefaultNotepad()
}
// Level returns the current global log threshold.
func LogThreshold() Threshold {
return defaultNotepad.logThreshold
}
// Level returns the current global output threshold.
func StdoutThreshold() Threshold {
return defaultNotepad.stdoutThreshold
}
// GetStdoutThreshold returns the defined Treshold for the log logger.
func GetLogThreshold() Threshold {
return defaultNotepad.GetLogThreshold()
}
// GetStdoutThreshold returns the Treshold for the stdout logger.
func GetStdoutThreshold() Threshold {
return defaultNotepad.GetStdoutThreshold()
}
// LogCountForLevel returns the number of log invocations for a given threshold.
func LogCountForLevel(l Threshold) uint64 {
return defaultNotepad.LogCountForLevel(l)
}
// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
// greater than or equal to a given threshold.
func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
}
// ResetLogCounters resets the invocation counters for all levels.
func ResetLogCounters() {
defaultNotepad.ResetLogCounters()
}
| 8,584 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/const_win_unix.go | // Copyright © 2016 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !darwin
// +build !openbsd
// +build !freebsd
// +build !dragonfly
// +build !netbsd
package afero
import (
"syscall"
)
const BADFD = syscall.EBADFD
| 8,585 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/match.go | // Copyright © 2014 Steve Francia <[email protected]>.
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// Lstat not supported by a ll filesystems.
if _, err = lstatIfPossible(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.IndexAny(path, "*?[") >= 0
}
| 8,586 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/go.mod | module github.com/spf13/afero
require golang.org/x/text v0.3.0
| 8,587 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/os.go | // Copyright © 2014 Steve Francia <[email protected]>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"time"
)
var _ Lstater = (*OsFs)(nil)
// OsFs is a Fs implementation that uses functions provided by the os package.
//
// For details in any method, check the documentation of the os package
// (http://golang.org/pkg/os/).
type OsFs struct{}
func NewOsFs() Fs {
return &OsFs{}
}
func (OsFs) Name() string { return "OsFs" }
func (OsFs) Create(name string) (File, error) {
f, e := os.Create(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Mkdir(name string, perm os.FileMode) error {
return os.Mkdir(name, perm)
}
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (OsFs) Open(name string) (File, error) {
f, e := os.Open(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, e := os.OpenFile(name, flag, perm)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Remove(name string) error {
return os.Remove(name)
}
func (OsFs) RemoveAll(path string) error {
return os.RemoveAll(path)
}
func (OsFs) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (OsFs) Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}
| 8,588 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/README.md | 
A FileSystem Abstraction System for Go
[](https://travis-ci.org/spf13/afero) [](https://ci.appveyor.com/project/spf13/afero) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is an filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with afero while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in a any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
## Afero Features
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
# Using Afero
Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
* Use the interfaces alone to define you own file system.
* Wrap for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Step 1: Install Afero
First use go get to install the latest version of the library.
$ go get github.com/spf13/afero
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs = afero.NewMemMapFs()
or
var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open('/tmp/foo')
```
We would replace it with:
```go
AppFs.Open('/tmp/foo')
```
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs()
afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your heart’s content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS := afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
name := "src/c"
_, err := appFS.Stat(name)
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
}
```
# Available Backends
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755))
```
## Memory Backed Storage
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isn’t
necessary. It is fully concurrent and will work within go routines
safely.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755))
```
#### InMemoryFile
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
## Network Interfaces
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
```
### ReadOnlyFs
A thin wrapper around the source Fs providing a read only view.
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
```
# RegexpFs
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
```
### HttpFs
Afero provides an http compatible backend which can wrap any of the existing
backends.
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>)))
http.Handle("/", fileserver)
```
## Composite Backends
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
```
### CopyOnWriteFs()
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
## Desired/possible backends
The following is a short list of possible backends we hope someone will
implement:
* SSH
* ZIP
* TAR
* S3
# About the project
## What's in the name
Afero comes from the latin roots Ad-Facere.
**"Ad"** is a prefix meaning "to".
**"Facere"** is a form of the root "faciō" making "make or do".
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
## Release Notes
* **0.10.0** 2015.12.10
* Full compatibility with Windows
* Introduction of afero utilities
* Test suite rewritten to work cross platform
* Normalize paths for MemMapFs
* Adding Sync to the file interface
* **Breaking Change** Walk and ReadDir have changed parameter order
* Moving types used by MemMapFs to a subpackage
* General bugfixes and improvements
* **0.9.0** 2015.11.05
* New Walk function similar to filepath.Walk
* MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
* MemMapFs.Remove now really deletes the file
* InMemoryFile.Readdir and Readdirnames work correctly
* InMemoryFile functions lock it for concurrent access
* Test suite improvements
* **0.8.0** 2014.10.28
* First public version
* Interfaces feel ready for people to build using
* Interfaces satisfy all known uses
* MemMapFs passes the majority of the OS test suite
* OsFs passes the majority of the OS test suite
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## License
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
| 8,589 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/LICENSE.txt | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
| 8,590 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/basepath.go | package afero
import (
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var _ Lstater = (*BasePathFs)(nil)
// The BasePathFs restricts all operations to a given path within an Fs.
// The given file name to the operations on this Fs will be prepended with
// the base path before calling the base Fs.
// Any file name (after filepath.Clean()) outside this base path will be
// treated as non existing file.
//
// Note that it does not clean the error messages on return, so you may
// reveal the real path on errors.
type BasePathFs struct {
source Fs
path string
}
type BasePathFile struct {
File
path string
}
func (f *BasePathFile) Name() string {
sourcename := f.File.Name()
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
}
func NewBasePathFs(source Fs, path string) Fs {
return &BasePathFs{source: source, path: path}
}
// on a file outside the base path it returns the given file name and an error,
// else the given file with the base path prepended
func (b *BasePathFs) RealPath(name string) (path string, err error) {
if err := validateBasePathName(name); err != nil {
return name, err
}
bpath := filepath.Clean(b.path)
path = filepath.Clean(filepath.Join(bpath, name))
if !strings.HasPrefix(path, bpath) {
return name, os.ErrNotExist
}
return path, nil
}
func validateBasePathName(name string) error {
if runtime.GOOS != "windows" {
// Not much to do here;
// the virtual file paths all look absolute on *nix.
return nil
}
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return os.ErrNotExist
}
return nil
}
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
}
return b.source.Chmod(name, mode)
}
func (b *BasePathFs) Name() string {
return "BasePathFs"
}
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
}
sourcef, err := b.source.OpenFile(name, flag, mode)
if err != nil {
return nil, err
}
return &BasePathFile{sourcef, b.path}, nil
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
sourcef, err := b.source.Open(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
}
sourcef, err := b.source.Create(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
name, err := b.RealPath(name)
if err != nil {
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
}
if lstater, ok := b.source.(Lstater); ok {
return lstater.LstatIfPossible(name)
}
fi, err := b.source.Stat(name)
return fi, false, err
}
// vim: ts=4 sw=4 noexpandtab nolist syn=go
| 8,591 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/httpFs.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}
| 8,592 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/util.go | // Copyright ©2015 Steve Francia <[email protected]>
// Portions Copyright ©2015 The Hugo Authors
// Portions Copyright 2016-present Bjørn Erik Pedersen <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// Filepath separator defined by os.Separator.
const FilePathSeparator = string(filepath.Separator)
// Takes a reader and a path and writes the content
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
return WriteReader(a.Fs, path, r)
}
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
return err
}
}
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
// Same as WriteReader but checks to see if file/directory already exists.
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
return SafeWriteReader(a.Fs, path, r)
}
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
return
}
}
exists, err := Exists(fs, path)
if err != nil {
return
}
if exists {
return fmt.Errorf("%v already exists", path)
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
func (a Afero) GetTempDir(subPath string) string {
return GetTempDir(a.Fs, subPath)
}
// GetTempDir returns the default temp directory with trailing slash
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
func GetTempDir(fs Fs, subPath string) string {
addSlash := func(p string) string {
if FilePathSeparator != p[len(p)-1:] {
p = p + FilePathSeparator
}
return p
}
dir := addSlash(os.TempDir())
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.Replace(subPath, "\\", "____", -1)
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.Replace(dir, "____", "\\", -1)
}
if exists, _ := Exists(fs, dir); exists {
return addSlash(dir)
}
err := fs.MkdirAll(dir, 0777)
if err != nil {
panic(err)
}
dir = addSlash(dir)
}
return dir
}
// Rewrite string to remove non-standard path characters
func UnicodeSanitize(s string) string {
source := []rune(s)
target := make([]rune, 0, len(source))
for _, r := range source {
if unicode.IsLetter(r) ||
unicode.IsDigit(r) ||
unicode.IsMark(r) ||
r == '.' ||
r == '/' ||
r == '\\' ||
r == '_' ||
r == '-' ||
r == '%' ||
r == ' ' ||
r == '#' {
target = append(target, r)
}
}
return string(target)
}
// Transform characters with accents into plain forms.
func NeuterAccents(s string) string {
t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
result, _, _ := transform.String(t, string(s))
return result
}
func isMn(r rune) bool {
return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
}
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
return FileContainsBytes(a.Fs, filename, subslice)
}
// Check if a file contains a specified byte slice.
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslice), nil
}
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
return FileContainsAnyBytes(a.Fs, filename, subslices)
}
// Check if a file contains any of the specified byte slices.
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslices...), nil
}
// readerContains reports whether any of the subslices is within r.
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
if r == nil || len(subslices) == 0 {
return false
}
largestSlice := 0
for _, sl := range subslices {
if len(sl) > largestSlice {
largestSlice = len(sl)
}
}
if largestSlice == 0 {
return false
}
bufflen := largestSlice * 4
halflen := bufflen / 2
buff := make([]byte, bufflen)
var err error
var n, i int
for {
i++
if i == 1 {
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
} else {
if i != 2 {
// shift left to catch overlapping matches
copy(buff[:], buff[halflen:])
}
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
}
if n > 0 {
for _, sl := range subslices {
if bytes.Contains(buff, sl) {
return true
}
}
}
if err != nil {
break
}
}
return false
}
func (a Afero) DirExists(path string) (bool, error) {
return DirExists(a.Fs, path)
}
// DirExists checks if a path exists and is a directory.
func DirExists(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (a Afero) IsDir(path string) (bool, error) {
return IsDir(a.Fs, path)
}
// IsDir checks if a given path is a directory.
func IsDir(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
return fi.IsDir(), nil
}
func (a Afero) IsEmpty(path string) (bool, error) {
return IsEmpty(a.Fs, path)
}
// IsEmpty checks if a given file or directory is empty.
func IsEmpty(fs Fs, path string) (bool, error) {
if b, _ := Exists(fs, path); !b {
return false, fmt.Errorf("%q path does not exist", path)
}
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.IsDir() {
f, err := fs.Open(path)
if err != nil {
return false, err
}
defer f.Close()
list, err := f.Readdir(-1)
return len(list) == 0, nil
}
return fi.Size() == 0, nil
}
func (a Afero) Exists(path string) (bool, error) {
return Exists(a.Fs, path)
}
// Check if a file or directory exists.
func Exists(fs Fs, path string) (bool, error) {
_, err := fs.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
combinedPath := filepath.Join(basePathFs.path, relativePath)
if parent, ok := basePathFs.source.(*BasePathFs); ok {
return FullBaseFsPath(parent, combinedPath)
}
return combinedPath
}
| 8,593 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/const_bsds.go | // Copyright © 2016 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin openbsd freebsd netbsd dragonfly
package afero
import (
"syscall"
)
const BADFD = syscall.EBADF
| 8,594 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/appveyor.yml | version: '{build}'
clone_folder: C:\gopath\src\github.com\spf13\afero
environment:
GOPATH: C:\gopath
build_script:
- cmd: >-
go version
go env
go get -v github.com/spf13/afero/...
go build github.com/spf13/afero
test_script:
- cmd: go test -race -v github.com/spf13/afero/...
| 8,595 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/lstater.go | // Copyright © 2018 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
)
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}
| 8,596 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/copyOnWriteFs.go | package afero
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
var _ Lstater = (*CopyOnWriteFs)(nil)
// The CopyOnWriteFs is a union filesystem: a read only base file system with
// a possibly writeable layer on top. Changes to the file system will only
// be made in the overlay: Changing an existing file in the base layer which
// is not present in the overlay will copy the file to the overlay ("changing"
// includes also calls to e.g. Chtimes() and Chmod()).
//
// Reading directories is currently only supported via Open(), not OpenFile().
type CopyOnWriteFs struct {
base Fs
layer Fs
}
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
return &CopyOnWriteFs{base: base, layer: layer}
}
// Returns true if the file is not in the overlay
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
if _, err := u.layer.Stat(name); err == nil {
return false, nil
}
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
if err == syscall.ENOENT {
return false, nil
}
}
return true, err
}
func (u *CopyOnWriteFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chmod(name, mode)
}
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
fi, err := u.layer.Stat(name)
if err != nil {
isNotExist := u.isNotExist(err)
if isNotExist {
return u.base.Stat(name)
}
return nil, err
}
return fi, nil
}
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
llayer, ok1 := u.layer.(Lstater)
lbase, ok2 := u.base.(Lstater)
if ok1 {
fi, b, err := llayer.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
if ok2 {
fi, b, err := lbase.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
fi, err := u.Stat(name)
return fi, false, err
}
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
}
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
return true
}
return false
}
// Renaming files present only in the base layer is not permitted
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
b, err := u.isBaseFile(oldname)
if err != nil {
return err
}
if b {
return syscall.EPERM
}
return u.layer.Rename(oldname, newname)
}
// Removing files present only in the base layer is not permitted. If
// a file is present in the base layer and the overlay, only the overlay
// will be removed.
func (u *CopyOnWriteFs) Remove(name string) error {
err := u.layer.Remove(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) RemoveAll(name string) error {
err := u.layer.RemoveAll(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
if b {
if err = u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
dir := filepath.Dir(name)
isaDir, err := IsDir(u.base, dir)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if isaDir {
if err = u.layer.MkdirAll(dir, 0777); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
isaDir, err = IsDir(u.layer, dir)
if err != nil {
return nil, err
}
if isaDir {
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)
}
return u.layer.OpenFile(name, flag, perm)
}
// This function handles the 9 different possibilities caused
// by the union which are the intersection of the following...
// layer: doesn't exist, exists as a file, and exists as a directory
// base: doesn't exist, exists as a file, and exists as a directory
func (u *CopyOnWriteFs) Open(name string) (File, error) {
// Since the overlay overrides the base we check that first
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
// If overlay doesn't exist, return the base (base state irrelevant)
if b {
return u.base.Open(name)
}
// If overlay is a file, return it (base state irrelevant)
dir, err := IsDir(u.layer, name)
if err != nil {
return nil, err
}
if !dir {
return u.layer.Open(name)
}
// Overlay is a directory, base state now matters.
// Base state has 3 states to check but 2 outcomes:
// A. It's a file or non-readable in the base (return just the overlay)
// B. It's an accessible directory in the base (return a UnionFile)
// If base is file or nonreadable, return overlay
dir, err = IsDir(u.base, name)
if !dir || err != nil {
return u.layer.Open(name)
}
// Both base & layer are directories
// Return union file (if opens are without error)
bfile, bErr := u.base.Open(name)
lfile, lErr := u.layer.Open(name)
// If either have errors at this point something is very wrong. Return nil and the errors
if bErr != nil || lErr != nil {
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return ErrFileExists
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Name() string {
return "CopyOnWriteFs"
}
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
// This is in line with how os.MkdirAll behaves.
return nil
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Create(name string) (File, error) {
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
}
| 8,597 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/unionFile.go | package afero
import (
"io"
"os"
"path/filepath"
"syscall"
)
// The UnionFile implements the afero.File interface and will be returned
// when reading a directory present at least in the overlay or opening a file
// for writing.
//
// The calls to
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
// base and the overlay - for files present in both layers, only those
// from the overlay will be used.
//
// When opening files for writing (Create() / OpenFile() with the right flags)
// the operations will be done in both layers, starting with the overlay. A
// successful read in the overlay will move the cursor position in the base layer
// by the number of bytes read.
type UnionFile struct {
Base File
Layer File
Merger DirsMerger
off int
files []os.FileInfo
}
func (f *UnionFile) Close() error {
// first close base, so we have a newer timestamp in the overlay. If we'd close
// the overlay first, we'd get a cacheStale the next time we access this file
// -> cache would be useless ;-)
if f.Base != nil {
f.Base.Close()
}
if f.Layer != nil {
return f.Layer.Close()
}
return BADFD
}
func (f *UnionFile) Read(s []byte) (int, error) {
if f.Layer != nil {
n, err := f.Layer.Read(s)
if (err == nil || err == io.EOF) && f.Base != nil {
// advance the file position also in the base file, the next
// call may be a write at this position (or a seek with SEEK_CUR)
if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
// only overwrite err in case the seek fails: we need to
// report an eventual io.EOF to the caller
err = seekErr
}
}
return n, err
}
if f.Base != nil {
return f.Base.Read(s)
}
return 0, BADFD
}
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
if f.Layer != nil {
n, err := f.Layer.ReadAt(s, o)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
}
return n, err
}
if f.Base != nil {
return f.Base.ReadAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
if f.Layer != nil {
pos, err = f.Layer.Seek(o, w)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o, w)
}
return pos, err
}
if f.Base != nil {
return f.Base.Seek(o, w)
}
return 0, BADFD
}
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
}
if f.Base != nil {
return f.Base.Write(s)
}
return 0, BADFD
}
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteAt(s, o)
if err == nil && f.Base != nil {
_, err = f.Base.WriteAt(s, o)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Name() string {
if f.Layer != nil {
return f.Layer.Name()
}
return f.Base.Name()
}
// DirsMerger is how UnionFile weaves two directories together.
// It takes the FileInfo slices from the layer and the base and returns a
// single view.
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
var files = make(map[string]os.FileInfo)
for _, fi := range lofi {
files[fi.Name()] = fi
}
for _, fi := range bofi {
if _, exists := files[fi.Name()]; !exists {
files[fi.Name()] = fi
}
}
rfi := make([]os.FileInfo, len(files))
i := 0
for _, fi := range files {
rfi[i] = fi
i++
}
return rfi, nil
}
// Readdir will weave the two directories together and
// return a single view of the overlayed directories.
// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}
if f.off == 0 {
var lfi []os.FileInfo
if f.Layer != nil {
lfi, err = f.Layer.Readdir(-1)
if err != nil {
return nil, err
}
}
var bfi []os.FileInfo
if f.Base != nil {
bfi, err = f.Base.Readdir(-1)
if err != nil {
return nil, err
}
}
merged, err := merge(lfi, bfi)
if err != nil {
return nil, err
}
f.files = append(f.files, merged...)
}
if c <= 0 && len(f.files) == 0 {
return f.files, nil
}
if f.off >= len(f.files) {
return nil, io.EOF
}
if c <= 0 {
return f.files[f.off:], nil
}
if c > len(f.files) {
c = len(f.files)
}
defer func() { f.off += c }()
return f.files[f.off:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
rfi, err := f.Readdir(c)
if err != nil {
return nil, err
}
var names []string
for _, fi := range rfi {
names = append(names, fi.Name())
}
return names, nil
}
func (f *UnionFile) Stat() (os.FileInfo, error) {
if f.Layer != nil {
return f.Layer.Stat()
}
if f.Base != nil {
return f.Base.Stat()
}
return nil, BADFD
}
func (f *UnionFile) Sync() (err error) {
if f.Layer != nil {
err = f.Layer.Sync()
if err == nil && f.Base != nil {
err = f.Base.Sync()
}
return err
}
if f.Base != nil {
return f.Base.Sync()
}
return BADFD
}
func (f *UnionFile) Truncate(s int64) (err error) {
if f.Layer != nil {
err = f.Layer.Truncate(s)
if err == nil && f.Base != nil {
err = f.Base.Truncate(s)
}
return err
}
if f.Base != nil {
return f.Base.Truncate(s)
}
return BADFD
}
func (f *UnionFile) WriteString(s string) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteString(s)
if err == nil && f.Base != nil {
_, err = f.Base.WriteString(s)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteString(s)
}
return 0, BADFD
}
func copyToLayer(base Fs, layer Fs, name string) error {
bfh, err := base.Open(name)
if err != nil {
return err
}
defer bfh.Close()
// First make sure the directory exists
exists, err := Exists(layer, filepath.Dir(name))
if err != nil {
return err
}
if !exists {
err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return err
}
}
// Create the file on the overlay
lfh, err := layer.Create(name)
if err != nil {
return err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
layer.Remove(name)
lfh.Close()
return err
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
layer.Remove(name)
lfh.Close()
return syscall.EIO
}
err = lfh.Close()
if err != nil {
layer.Remove(name)
lfh.Close()
return err
}
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
}
| 8,598 |
0 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13 | kubeflow_public_repos/fate-operator/vendor/github.com/spf13/afero/cacheOnReadFs.go | package afero
import (
"os"
"syscall"
"time"
)
// If the cache duration is 0, cache time will be unlimited, i.e. once
// a file is in the layer, the base will never be read again for this file.
//
// For cache times greater than 0, the modification time of a file is
// checked. Note that a lot of file system implementations only allow a
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
// states: "The underlying filesystem may truncate or round the values to a
// less precise time unit."
//
// This caching union will forward all write calls also to the base file
// system first. To prevent writing to the base Fs, wrap it in a read-only
// filter - Note: this will also make the overlay read-only, for writing files
// in the overlay, use the overlay Fs directly, not via the union Fs.
type CacheOnReadFs struct {
base Fs
layer Fs
cacheTime time.Duration
}
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
}
type cacheState int
const (
// not present in the overlay, unknown if it exists in the base:
cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
// with cacheTime > 0 it exists in the base and is same age or newer in the
// overlay
cacheHit
// happens if someone writes directly to the overlay without
// going through this union
cacheLocal
)
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
func (u *CacheOnReadFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chtimes(name, atime, mtime)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chtimes(name, atime, mtime)
}
if err != nil {
return err
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chmod(name, mode)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chmod(name, mode)
}
if err != nil {
return err
}
return u.layer.Chmod(name, mode)
}
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheMiss:
return u.base.Stat(name)
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
return fi, nil
}
}
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
st, _, err := u.cacheStatus(oldname)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Rename(oldname, newname)
case cacheStale, cacheMiss:
if err := u.copyToLayer(oldname); err != nil {
return err
}
err = u.base.Rename(oldname, newname)
}
if err != nil {
return err
}
return u.layer.Rename(oldname, newname)
}
func (u *CacheOnReadFs) Remove(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.Remove(name)
}
if err != nil {
return err
}
return u.layer.Remove(name)
}
func (u *CacheOnReadFs) RemoveAll(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.RemoveAll(name)
}
if err != nil {
return err
}
return u.layer.RemoveAll(name)
}
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
if err := u.copyToLayer(name); err != nil {
return nil, err
}
}
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.layer.OpenFile(name, flag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
return &UnionFile{Base: bfi, Layer: lfi}, nil
}
return u.layer.OpenFile(name, flag, perm)
}
func (u *CacheOnReadFs) Open(name string) (File, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal:
return u.layer.Open(name)
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if bfi.IsDir() {
return u.base.Open(name)
}
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
case cacheStale:
if !fi.IsDir() {
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
}
case cacheHit:
if !fi.IsDir() {
return u.layer.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.layer.Open(name)
if err != nil && bfile == nil {
return nil, err
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *CacheOnReadFs) Name() string {
return "CacheOnReadFs"
}
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm)
}
func (u *CacheOnReadFs) Create(name string) (File, error) {
bfh, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfh, err := u.layer.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
bfh.Close()
return nil, err
}
return &UnionFile{Base: bfh, Layer: lfh}, nil
}
| 8,599 |
Subsets and Splits