index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/_samples/TFX_pipeline.ipynb | # Put your KFP cluster endpoint URL here if working from GCP notebooks (or local notebooks). ('https://xxxxx.notebooks.googleusercontent.com/')
kfp_endpoint='https://XXXXX.notebooks.googleusercontent.com/'input_data_uri = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/tfx/components/testdata/external/csv'
#Only S3/GCS is supported for now.
module_file = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/v0.21.4/tfx/examples/chicago_taxi_pipeline/taxi_utils.py'import kfpimport json
from kfp.components import load_component_from_url
download_from_gcs_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d013b8535666641ca5a5be6ce67e69e044bbf076/components/google-cloud/storage/download/component.yaml')
CsvExampleGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/ExampleGen/CsvExampleGen/component.yaml')
StatisticsGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/StatisticsGen/component.yaml')
SchemaGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/SchemaGen/component.yaml')
ExampleValidator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/ExampleValidator/component.yaml')
Transform_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/Transform/component.yaml')
Trainer_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/Trainer/component.yaml')
Evaluator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/8c545b62/components/tfx/Evaluator/component.yaml')
def tfx_pipeline(
input_data_uri,
):
download_task = download_from_gcs_op(
input_data_uri,
)
examples_task = CsvExampleGen_op(
input=download_task.output,
input_config=json.dumps({
"splits": [
{'name': 'data', 'pattern': '*.csv'},
]
}),
output_config=json.dumps({
"splitConfig": {
"splits": [
{'name': 'train', 'hash_buckets': 2},
{'name': 'eval', 'hash_buckets': 1},
]
}
}),
)
statistics_task = StatisticsGen_op(
examples=examples_task.outputs['examples'],
)
schema_task = SchemaGen_op(
statistics=statistics_task.outputs['statistics'],
)
# Performs anomaly detection based on statistics and data schema.
validator_task = ExampleValidator_op(
statistics=statistics_task.outputs['statistics'],
schema=schema_task.outputs['schema'],
)
# Performs transformations and feature engineering in training and serving.
transform_task = Transform_op(
examples=examples_task.outputs['examples'],
schema=schema_task.outputs['schema'],
module_file=module_file,
)
trainer_task = Trainer_op(
module_file=module_file,
examples=transform_task.outputs['transformed_examples'],
schema=schema_task.outputs['schema'],
transform_graph=transform_task.outputs['transform_graph'],
train_args=json.dumps({'num_steps': 10000}),
eval_args=json.dumps({'num_steps': 5000}),
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator_op(
examples=examples_task.outputs['examples'],
model=trainer_task.outputs['model'],
feature_slicing_spec=json.dumps({
'specs': [
{'column_for_slicing': ['trip_start_hour']},
],
}),
)
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
tfx_pipeline,
arguments=dict(
input_data_uri=input_data_uri,
),
) | 8,100 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/_samples/TFX_Dataflow_pipeline.ipynb | # Put your KFP cluster endpoint URL here if working from GCP notebooks (or local notebooks). ('https://xxxxx.notebooks.googleusercontent.com/')
kfp_endpoint='https://XXXXX.notebooks.googleusercontent.com/'
# Replace with your GCS bucket, project ID and GCP region
root_output_uri = '<your gcs bucket>'
project_id = '<your project id>'
gcp_region = '<your gcp region>'
beam_pipeline_args = [
'--runner=DataflowRunner',
'--experiments=shuffle_mode=auto',
'--project=' + project_id,
'--temp_location=' + root_output_uri + '/tmp',
'--region=' + gcp_region,
'--disk_size_gb=50',
]
input_data_uri = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/tfx/components/testdata/external/csv'
#Only S3/GCS is supported for now.
module_file = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/v0.21.4/tfx/examples/chicago_taxi_pipeline/taxi_utils.py'import kfpimport json
from kfp.components import load_component_from_url
CsvExampleGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml')
StatisticsGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/StatisticsGen/with_URI_IO/component.yaml')
SchemaGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/SchemaGen/with_URI_IO/component.yaml')
ExampleValidator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/ExampleValidator/with_URI_IO/component.yaml')
Transform_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/Transform/with_URI_IO/component.yaml')
Trainer_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/Trainer/with_URI_IO/component.yaml')
Evaluator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0cc4bbd4/components/tfx/Evaluator/with_URI_IO/component.yaml')
def tfx_pipeline(
input_data_uri,
root_output_uri,
):
generated_output_uri = root_output_uri + kfp.dsl.EXECUTION_ID_PLACEHOLDER
examples_task = CsvExampleGen_op(
input_uri=input_data_uri,
input_config=json.dumps({
"splits": [
{'name': 'data', 'pattern': '*.csv'},
]
}),
output_config=json.dumps({
"splitConfig": {
"splits": [
{'name': 'train', 'hash_buckets': 2},
{'name': 'eval', 'hash_buckets': 1},
]
}
}),
beam_pipeline_args=beam_pipeline_args,
output_examples_uri=generated_output_uri,
)
statistics_task = StatisticsGen_op(
examples_uri=examples_task.outputs['examples_uri'],
beam_pipeline_args=beam_pipeline_args,
output_statistics_uri=generated_output_uri,
)
schema_task = SchemaGen_op(
statistics_uri=statistics_task.outputs['statistics_uri'],
beam_pipeline_args=beam_pipeline_args,
output_schema_uri=generated_output_uri,
)
# Performs anomaly detection based on statistics and data schema.
validator_task = ExampleValidator_op(
statistics_uri=statistics_task.outputs['statistics_uri'],
schema_uri=schema_task.outputs['schema_uri'],
beam_pipeline_args=beam_pipeline_args,
output_anomalies_uri=generated_output_uri,
)
# Performs transformations and feature engineering in training and serving.
transform_task = Transform_op(
examples_uri=examples_task.outputs['examples_uri'],
schema_uri=schema_task.outputs['schema_uri'],
module_file=module_file,
beam_pipeline_args=beam_pipeline_args,
output_transform_graph_uri=generated_output_uri + '/transform_graph',
output_transformed_examples_uri=generated_output_uri + '/transformed_examples',
)
trainer_task = Trainer_op(
module_file=module_file,
examples_uri=transform_task.outputs['transformed_examples_uri'],
schema_uri=schema_task.outputs['schema_uri'],
transform_graph_uri=transform_task.outputs['transform_graph_uri'],
train_args=json.dumps({'num_steps': 10000}),
eval_args=json.dumps({'num_steps': 5000}),
beam_pipeline_args=beam_pipeline_args,
output_model_uri=generated_output_uri,
)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator_op(
examples_uri=examples_task.outputs['examples_uri'],
model_uri=trainer_task.outputs['model_uri'],
feature_slicing_spec=json.dumps({
'specs': [
{'column_for_slicing': ['trip_start_hour']},
],
}),
beam_pipeline_args=beam_pipeline_args,
output_evaluation_uri=generated_output_uri + '/evaluation',
output_blessing_uri=generated_output_uri + '/blessing',
)
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
tfx_pipeline,
arguments=dict(
input_data_uri=input_data_uri,
root_output_uri=root_output_uri,
),
) | 8,101 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def Evaluator(
evaluation_path: OutputPath('ModelEvaluation'),
examples_path: InputPath('Examples'),
model_path: InputPath('Model'),
baseline_model_path: InputPath('Model') = None,
schema_path: InputPath('Schema') = None,
feature_slicing_spec: {'JsonObject': {'data_type': 'proto:tfx.components.evaluator.FeatureSlicingSpec'}} = None, # TODO: Replace feature_slicing_spec with eval_config
eval_config: {'JsonObject': {'data_type': 'proto:tensorflow_model_analysis.EvalConfig'}} = None,
fairness_indicator_thresholds: list = None, # List[str]
#blessing_path: OutputPath('ModelBlessing') = None, # Optional outputs are not supported yet
):
"""
A TFX component to evaluate models trained by a TFX Trainer component.
The Evaluator component performs model evaluations in the TFX pipeline and
the resultant metrics can be viewed in a Jupyter notebook. It uses the
input examples generated from the
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
component to evaluate the models.
Specifically, it can provide:
- metrics computed on entire training and eval dataset
- tracking metrics over time
- model quality performance on different feature slices
## Exporting the EvalSavedModel in Trainer
In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
exported during training, which is a special SavedModel containing
annotations for the metrics, features, labels, and so on in your model.
Evaluator uses this EvalSavedModel to compute metrics.
As part of this, the Trainer component creates eval_input_receiver_fn,
analogous to the serving_input_receiver_fn, which will extract the features
and labels from the input data. As with serving_input_receiver_fn, there are
utility functions to help with this.
Please see https://www.tensorflow.org/tfx/model_analysis for more details.
Args:
examples: A Channel of 'Examples' type, usually produced by ExampleGen
component. @Ark-kun: Must have the eval split. _required_
model: A Channel of 'Model' type, usually produced by
Trainer component.
feature_slicing_spec:
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data.
Returns:
evaluation: Channel of `ModelEvaluation` to store the evaluation results.
Either `model_exports` or `model` must be present in the input arguments.
"""
from tfx.components.evaluator.component import Evaluator as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get(name + '_path', None)
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
Evaluator,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,102 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator/component.yaml | name: Evaluator
description: |-
A TFX component to evaluate models trained by a TFX Trainer component.
The Evaluator component performs model evaluations in the TFX pipeline and
the resultant metrics can be viewed in a Jupyter notebook. It uses the
input examples generated from the
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
component to evaluate the models.
Specifically, it can provide:
- metrics computed on entire training and eval dataset
- tracking metrics over time
- model quality performance on different feature slices
## Exporting the EvalSavedModel in Trainer
In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
exported during training, which is a special SavedModel containing
annotations for the metrics, features, labels, and so on in your model.
Evaluator uses this EvalSavedModel to compute metrics.
As part of this, the Trainer component creates eval_input_receiver_fn,
analogous to the serving_input_receiver_fn, which will extract the features
and labels from the input data. As with serving_input_receiver_fn, there are
utility functions to help with this.
Please see https://www.tensorflow.org/tfx/model_analysis for more details.
Args:
examples: A Channel of 'Examples' type, usually produced by ExampleGen
component. @Ark-kun: Must have the eval split. _required_
model: A Channel of 'Model' type, usually produced by
Trainer component.
feature_slicing_spec:
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data.
Returns:
evaluation: Channel of `ModelEvaluation` to store the evaluation results.
Either `model_exports` or `model` must be present in the input arguments.
inputs:
- {name: examples, type: Examples}
- {name: model, type: Model}
- {name: baseline_model, type: Model, optional: true}
- {name: schema, type: Schema, optional: true}
- name: feature_slicing_spec
type:
JsonObject: {data_type: 'proto:tfx.components.evaluator.FeatureSlicingSpec'}
optional: true
- name: eval_config
type:
JsonObject: {data_type: 'proto:tensorflow_model_analysis.EvalConfig'}
optional: true
- {name: fairness_indicator_thresholds, type: JsonArray, optional: true}
outputs:
- {name: evaluation, type: ModelEvaluation}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def Evaluator(
evaluation_path ,
examples_path ,
model_path ,
baseline_model_path = None,
schema_path = None,
feature_slicing_spec = None, # TODO: Replace feature_slicing_spec with eval_config
eval_config = None,
fairness_indicator_thresholds = None, # List[str]
#blessing_path: OutputPath('ModelBlessing') = None, # Optional outputs are not supported yet
):
"""
A TFX component to evaluate models trained by a TFX Trainer component.
The Evaluator component performs model evaluations in the TFX pipeline and
the resultant metrics can be viewed in a Jupyter notebook. It uses the
input examples generated from the
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
component to evaluate the models.
Specifically, it can provide:
- metrics computed on entire training and eval dataset
- tracking metrics over time
- model quality performance on different feature slices
## Exporting the EvalSavedModel in Trainer
In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
exported during training, which is a special SavedModel containing
annotations for the metrics, features, labels, and so on in your model.
Evaluator uses this EvalSavedModel to compute metrics.
As part of this, the Trainer component creates eval_input_receiver_fn,
analogous to the serving_input_receiver_fn, which will extract the features
and labels from the input data. As with serving_input_receiver_fn, there are
utility functions to help with this.
Please see https://www.tensorflow.org/tfx/model_analysis for more details.
Args:
examples: A Channel of 'Examples' type, usually produced by ExampleGen
component. @Ark-kun: Must have the eval split. _required_
model: A Channel of 'Model' type, usually produced by
Trainer component.
feature_slicing_spec:
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data.
Returns:
evaluation: Channel of `ModelEvaluation` to store the evaluation results.
Either `model_exports` or `model` must be present in the input arguments.
"""
from tfx.components.evaluator.component import Evaluator as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get(name + '_path', None)
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Evaluator', description="A TFX component to evaluate models trained by a TFX Trainer component.\n\n The Evaluator component performs model evaluations in the TFX pipeline and\n the resultant metrics can be viewed in a Jupyter notebook. It uses the\n input examples generated from the\n [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)\n component to evaluate the models.\n\n Specifically, it can provide:\n - metrics computed on entire training and eval dataset\n - tracking metrics over time\n - model quality performance on different feature slices\n\n ## Exporting the EvalSavedModel in Trainer\n\n In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be\n exported during training, which is a special SavedModel containing\n annotations for the metrics, features, labels, and so on in your model.\n Evaluator uses this EvalSavedModel to compute metrics.\n\n As part of this, the Trainer component creates eval_input_receiver_fn,\n analogous to the serving_input_receiver_fn, which will extract the features\n and labels from the input data. As with serving_input_receiver_fn, there are\n utility functions to help with this.\n\n Please see https://www.tensorflow.org/tfx/model_analysis for more details.\n\n Args:\n examples: A Channel of 'Examples' type, usually produced by ExampleGen\n component. @Ark-kun: Must have the eval split. _required_\n model: A Channel of 'Model' type, usually produced by\n Trainer component.\n feature_slicing_spec:\n [evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)\n instance that describes how Evaluator should slice the data.\n Returns:\n evaluation: Channel of `ModelEvaluation` to store the evaluation results.\n\n Either `model_exports` or `model` must be present in the input arguments.")
_parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--baseline-model", dest="baseline_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--feature-slicing-spec", dest="feature_slicing_spec", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--eval-config", dest="eval_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--fairness-indicator-thresholds", dest="fairness_indicator_thresholds", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--evaluation", dest="evaluation_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Evaluator(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples
- {inputPath: examples}
- --model
- {inputPath: model}
- if:
cond: {isPresent: baseline_model}
then:
- --baseline-model
- {inputPath: baseline_model}
- if:
cond: {isPresent: schema}
then:
- --schema
- {inputPath: schema}
- if:
cond: {isPresent: feature_slicing_spec}
then:
- --feature-slicing-spec
- {inputValue: feature_slicing_spec}
- if:
cond: {isPresent: eval_config}
then:
- --eval-config
- {inputValue: eval_config}
- if:
cond: {isPresent: fairness_indicator_thresholds}
then:
- --fairness-indicator-thresholds
- {inputValue: fairness_indicator_thresholds}
- --evaluation
- {outputPath: evaluation}
| 8,103 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def Evaluator(
examples_uri: 'ExamplesUri',
model_uri: 'ModelUri',
output_evaluation_uri: 'ModelEvaluationUri',
output_blessing_uri: 'ModelBlessingUri',
baseline_model_uri: 'ModelUri' = None,
schema_uri: 'SchemaUri' = None,
eval_config: {'JsonObject': {'data_type': 'proto:tensorflow_model_analysis.EvalConfig'}} = None,
feature_slicing_spec: {'JsonObject': {'data_type': 'proto:tfx.components.evaluator.FeatureSlicingSpec'}} = None,
fairness_indicator_thresholds: list = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('evaluation_uri', 'ModelEvaluationUri'),
('blessing_uri', 'ModelBlessingUri'),
]):
from tfx.components import Evaluator as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_evaluation_uri, output_blessing_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
Evaluator,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,104 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Evaluator/with_URI_IO/component.yaml | name: Evaluator
inputs:
- {name: examples_uri, type: ExamplesUri}
- {name: model_uri, type: ModelUri}
- {name: output_evaluation_uri, type: ModelEvaluationUri}
- {name: output_blessing_uri, type: ModelBlessingUri}
- {name: baseline_model_uri, type: ModelUri, optional: true}
- {name: schema_uri, type: SchemaUri, optional: true}
- name: eval_config
type:
JsonObject: {data_type: 'proto:tensorflow_model_analysis.EvalConfig'}
optional: true
- name: feature_slicing_spec
type:
JsonObject: {data_type: 'proto:tfx.components.evaluator.FeatureSlicingSpec'}
optional: true
- {name: fairness_indicator_thresholds, type: JsonArray, optional: true}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: evaluation_uri, type: ModelEvaluationUri}
- {name: blessing_uri, type: ModelBlessingUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def Evaluator(
examples_uri,
model_uri,
output_evaluation_uri,
output_blessing_uri,
baseline_model_uri = None,
schema_uri = None,
eval_config = None,
feature_slicing_spec = None,
fairness_indicator_thresholds = None,
beam_pipeline_args = None,
):
from tfx.components import Evaluator as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_evaluation_uri, output_blessing_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='Evaluator', description='')
_parser.add_argument("--examples-uri", dest="examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model-uri", dest="model_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-evaluation-uri", dest="output_evaluation_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-blessing-uri", dest="output_blessing_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--baseline-model-uri", dest="baseline_model_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--schema-uri", dest="schema_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--eval-config", dest="eval_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--feature-slicing-spec", dest="feature_slicing_spec", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--fairness-indicator-thresholds", dest="fairness_indicator_thresholds", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Evaluator(**_parsed_args)
_output_serializers = [
str,
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples-uri
- {inputValue: examples_uri}
- --model-uri
- {inputValue: model_uri}
- --output-evaluation-uri
- {inputValue: output_evaluation_uri}
- --output-blessing-uri
- {inputValue: output_blessing_uri}
- if:
cond: {isPresent: baseline_model_uri}
then:
- --baseline-model-uri
- {inputValue: baseline_model_uri}
- if:
cond: {isPresent: schema_uri}
then:
- --schema-uri
- {inputValue: schema_uri}
- if:
cond: {isPresent: eval_config}
then:
- --eval-config
- {inputValue: eval_config}
- if:
cond: {isPresent: feature_slicing_spec}
then:
- --feature-slicing-spec
- {inputValue: feature_slicing_spec}
- if:
cond: {isPresent: fairness_indicator_thresholds}
then:
- --fairness-indicator-thresholds
- {inputValue: fairness_indicator_thresholds}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: evaluation_uri}
- {outputPath: blessing_uri}
| 8,105 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def Transform(
examples_path: InputPath('Examples'),
schema_path: InputPath('Schema'),
transform_graph_path: OutputPath('TransformGraph'),
transformed_examples_path: OutputPath('Examples'),
module_file: str = None,
preprocessing_fn: str = None,
custom_config: dict = None,
):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type (required). This should
contain the two splits 'train' and 'eval'.
schema: A Channel of 'SchemaPath' type. This should contain a single
schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must
be supplied.
Returns:
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
from tfx.components.transform.component import Transform
component_class = Transform
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
Transform,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,106 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform/component.yaml | name: Transform
description: |-
A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type (required). This should
contain the two splits 'train' and 'eval'.
schema: A Channel of 'SchemaPath' type. This should contain a single
schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must
be supplied.
Returns:
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
inputs:
- {name: examples, type: Examples}
- {name: schema, type: Schema}
- {name: module_file, type: String, optional: true}
- {name: preprocessing_fn, type: String, optional: true}
- {name: custom_config, type: JsonObject, optional: true}
outputs:
- {name: transform_graph, type: TransformGraph}
- {name: transformed_examples, type: Examples}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def Transform(
examples_path ,
schema_path ,
transform_graph_path ,
transformed_examples_path ,
module_file = None,
preprocessing_fn = None,
custom_config = None,
):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type (required). This should
contain the two splits 'train' and 'eval'.
schema: A Channel of 'SchemaPath' type. This should contain a single
schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must
be supplied.
Returns:
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
from tfx.components.transform.component import Transform
component_class = Transform
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Transform', description="A TFX component to transform the input examples.\n\n The Transform component wraps TensorFlow Transform (tf.Transform) to\n preprocess data in a TFX pipeline. This component will load the\n preprocessing_fn from input module file, preprocess both 'train' and 'eval'\n splits of input examples, generate the `tf.Transform` output, and save both\n transform function and transformed examples to orchestrator desired locations.\n\n ## Providing a preprocessing function\n The TFX executor will use the estimator provided in the `module_file` file\n to train the model. The Transform executor will look specifically for the\n `preprocessing_fn()` function within that file.\n\n An example of `preprocessing_fn()` can be found in the [user-supplied\n code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))\n of the TFX Chicago Taxi pipeline example.\n\n Args:\n examples: A Channel of 'Examples' type (required). This should\n contain the two splits 'train' and 'eval'.\n schema: A Channel of 'SchemaPath' type. This should contain a single\n schema artifact.\n module_file: The file path to a python module file, from which the\n 'preprocessing_fn' function will be loaded. The function must have the\n following signature.\n\n def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:\n ...\n\n where the values of input and returned Dict are either tf.Tensor or\n tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'\n must be supplied.\n preprocessing_fn: The path to python function that implements a\n 'preprocessing_fn'. See 'module_file' for expected signature of the\n function. Exactly one of 'module_file' or 'preprocessing_fn' must\n be supplied.\n\n Returns:\n transform_graph: Optional output 'TransformPath' channel for output of\n 'tf.Transform', which includes an exported Tensorflow graph suitable for\n both training and serving;\n transformed_examples: Optional output 'ExamplesPath' channel for\n materialized transformed examples, which includes both 'train' and\n 'eval' splits.\n\n Raises:\n ValueError: When both or neither of 'module_file' and 'preprocessing_fn'\n is supplied.")
_parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--preprocessing-fn", dest="preprocessing_fn", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--transform-graph", dest="transform_graph_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--transformed-examples", dest="transformed_examples_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Transform(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples
- {inputPath: examples}
- --schema
- {inputPath: schema}
- if:
cond: {isPresent: module_file}
then:
- --module-file
- {inputValue: module_file}
- if:
cond: {isPresent: preprocessing_fn}
then:
- --preprocessing-fn
- {inputValue: preprocessing_fn}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- --transform-graph
- {outputPath: transform_graph}
- --transformed-examples
- {outputPath: transformed_examples}
| 8,107 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def Transform(
examples_uri: 'ExamplesUri',
schema_uri: 'SchemaUri',
output_transform_graph_uri: 'TransformGraphUri',
output_transformed_examples_uri: 'ExamplesUri',
module_file: str = None,
preprocessing_fn: str = None,
custom_config: dict = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('transform_graph_uri', 'TransformGraphUri'),
('transformed_examples_uri', 'ExamplesUri'),
]):
from tfx.components import Transform as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_transform_graph_uri, output_transformed_examples_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
Transform,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,108 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Transform/with_URI_IO/component.yaml | name: Transform
inputs:
- {name: examples_uri, type: ExamplesUri}
- {name: schema_uri, type: SchemaUri}
- {name: output_transform_graph_uri, type: TransformGraphUri}
- {name: output_transformed_examples_uri, type: ExamplesUri}
- {name: module_file, type: String, optional: true}
- {name: preprocessing_fn, type: String, optional: true}
- {name: custom_config, type: JsonObject, optional: true}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: transform_graph_uri, type: TransformGraphUri}
- {name: transformed_examples_uri, type: ExamplesUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def Transform(
examples_uri,
schema_uri,
output_transform_graph_uri,
output_transformed_examples_uri,
module_file = None,
preprocessing_fn = None,
custom_config = None,
beam_pipeline_args = None,
):
from tfx.components import Transform as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_transform_graph_uri, output_transformed_examples_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='Transform', description='')
_parser.add_argument("--examples-uri", dest="examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema-uri", dest="schema_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-transform-graph-uri", dest="output_transform_graph_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-transformed-examples-uri", dest="output_transformed_examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--preprocessing-fn", dest="preprocessing_fn", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Transform(**_parsed_args)
_output_serializers = [
str,
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples-uri
- {inputValue: examples_uri}
- --schema-uri
- {inputValue: schema_uri}
- --output-transform-graph-uri
- {inputValue: output_transform_graph_uri}
- --output-transformed-examples-uri
- {inputValue: output_transformed_examples_uri}
- if:
cond: {isPresent: module_file}
then:
- --module-file
- {inputValue: module_file}
- if:
cond: {isPresent: preprocessing_fn}
then:
- --preprocessing-fn
- {inputValue: preprocessing_fn}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: transform_graph_uri}
- {outputPath: transformed_examples_uri}
| 8,109 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/README.md | # Seldon Core - Serve PyTorch Model
## Intended Use
Serve PyTorch Models remotely as web service using Seldon Core
## Run-Time Parameters:
Name | Description
:--- | :----------
model_id | Required. Model training_id from Fabric for Deep Learning
deployment_name | Required. Deployment name for the seldon service
model_class_name | PyTorch model class name', default: 'ModelClass'
model_class_file | File that contains the PyTorch model class', default: 'model_class.py'
serving_image | Model serving images', default: 'aipipeline/seldon-pytorch:0.1
## Output:
Name | Description
:--- | :----------
output | Model Serving status
## Sample
Note: the sample code below works in both IPython notebook or python code directly.
### Set sample parameters
```python
# Parameters
model_id = 'Model training_id'
deployment_name = 'Deployment name for the seldon service'
model_class_name = 'PyTorch model class name'
model_class_file = 'File that contains the PyTorch model class'
serving_image = 'aipipeline/seldon-pytorch:0.1'
```
```python
# Additional Parameters
EXPERIMENT_NAME = 'Seldon Core - Serve PyTorch Model'
COMPONENT_SPEC_URI = 'https://raw.githubusercontent.com/kubeflow/pipelines/eb830cd73ca148e5a1a6485a9374c2dc068314bc/components/ibm-components/ffdl/serve/component.yaml'
```
### Install KFP SDK
Install the SDK (Uncomment the code if the SDK is not installed before)
```python
#KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.12/kfp.tar.gz'
#!pip3 install $KFP_PACKAGE --upgrade
```
### Load component definitions
```python
import kfp.components as comp
ffdl_serve_op = comp.load_component_from_url(COMPONENT_SPEC_URI)
display(ffdl_serve_op)
```
### Here is an illustrative pipeline that uses the component
```python
import kfp.dsl as dsl
import ai_pipeline_params as params
import json
@dsl.pipeline(
name='FfDL Serve Pipeline',
description='FfDL Serve pipeline leveraging Sledon'
)
def ffdl_train_pipeline(
model_id,
deployment_name,
model_class_name,
model_class_file,
serving_image
):
ffdl_serve_op(model_id, deployment_name,model_class_name,model_class_file,serving_image).apply(params.use_ai_pipeline_params('kfp-creds'))
```
### Compile the pipeline
```python
pipeline_func = ffdl_serve_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
### Submit the pipeline for execution
```python
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
| 8,110 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Serve PyTorch Model - Seldon Core'
description: |
Serve PyTorch Models remotely as web service using Seldon Core
metadata:
annotations: {platform: 'OpenSource'}
inputs:
- {name: model_id, description: 'Required. Model training_id from Fabric for Deep Learning'}
- {name: deployment_name, description: 'Required. Deployment name for the seldon service'}
- {name: model_class_name, description: 'PyTorch model class name', default: 'ModelClass'}
- {name: model_class_file, description: 'File that contains the PyTorch model class', default: 'model_class.py'}
- {name: serving_image, description: 'Model serving images', default: 'aipipeline/seldon-pytorch:0.1'}
outputs:
- {name: output, description: 'Model Serving status'}
implementation:
container:
image: docker.io/aipipeline/ffdl-serve:latest
command: ['python']
args: [
-u, serve.py,
--model_id, {inputValue: model_id},
--deployment_name, {inputValue: deployment_name},
--model_class_name, {inputValue: model_class_name},
--model_class_file, {inputValue: model_class_file},
--serving_image, {inputValue: serving_image}
]
fileOutputs:
output: /tmp/deployment_result.txt
| 8,111 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/Dockerfile | FROM python:3.6-slim
RUN pip install kubernetes Flask flask-cors requests
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python", "serve.py"]
| 8,112 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/src/app.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
import re
import requests
import sys
import traceback
from flask import Flask, request, abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# Setup Logging
logging.basicConfig(level="INFO", format='%(levelname)s: %(message)s')
LOG = logging.getLogger("deploy_seldon")
def apply_oid_token_monkey_patch():
LOG.warning("applying monkey-patch for https://github.com/kubernetes-client/python/issues/525")
import base64
import json
import kubernetes
from datetime import datetime, timezone
from kubernetes.config.kube_config import _is_expired
def load_oid_token_patched(self, provider):
if 'auth-provider' not in self._user:
return
provider = self._user['auth-provider']
if 'name' not in provider or 'config' not in provider or provider['name'] != 'oidc':
return
parts = provider['config']['id-token'].split('.')
if len(parts) != 3: # Not a valid JWT
return None
padding = (4 - len(parts[1]) % 4) * '='
jwt_attributes = json.loads(base64.b64decode(parts[1] + padding).decode('utf-8'))
expire = jwt_attributes.get('exp')
if (expire is not None) and _is_expired(datetime.fromtimestamp(expire, tz=timezone.utc)):
self._refresh_oidc(provider)
if self._config_persister:
self._config_persister(self._config.value)
self.token = "Bearer %s" % provider['config']['id-token']
return self.token
kubernetes.config.kube_config.KubeConfigLoader._load_oid_token = load_oid_token_patched
def load_kube_config(params):
# from six import PY3
# PY3 = sys.version_info.major == 3
#
# # apply monkey-patch for kubernetes client OIDC authentication issue 525 ("binascii.Error: Incorrect padding")
# # before importing client and config from kubernetes
# if PY3:
# apply_oid_token_monkey_patch()
from kubernetes import config
# kube_config_file = "kube/%s/kube-config.yml" % params["public_ip"]
config.load_incluster_config()
def get_api_client_v1():
import kubernetes
api_client_v1 = kubernetes.client.CoreV1Api()
return api_client_v1
def get_custom_objects_api_client():
import kubernetes
api_client = kubernetes.client.CustomObjectsApi()
return api_client
def get_seldon_spec(params):
with open("kube/seldon.json") as f:
spec = json.load(f)
# override the 'SELDON_DEPLOYMENT_ID' and the kubernetes service name with the 'deployment_name' from the parameters
deployment_name = get_deployment_name(params)
spec["metadata"]["name"] = deployment_name # 'fashion-deployment-id' ... SELDON_DEPLOYMENT_ID
spec["spec"]["name"] = deployment_name # 'fashion-service-name'
return spec
def update_seldon_spec(params):
spec = get_seldon_spec(params)
if "container_image" in params:
spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["image"] = params["container_image"]
env_list = spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"]
env_dict = {var["name"]: var["value"] for var in env_list}
env_dict["MODEL_FILE_NAME"] = params["model_file_name"]
env_dict["TRAINING_ID"] = params["training_id"]
env_dict["BUCKET_NAME"] = params["training_results_bucket"]
env_dict["BUCKET_ENDPOINT_URL"] = params["aws_endpoint_url"]
env_dict["BUCKET_KEY"] = params['aws_access_key_id']
env_dict["BUCKET_SECRET"] = params['aws_secret_access_key']
env_dict["MODEL_CLASS_NAME"] = params['model_class_name']
env_dict["MODEL_CLASS_FILE"] = params['model_class_file']
env_updated = [{"name": key, "value": value} for key, value in env_dict.items()]
spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"] = env_updated
return spec
def deploy_seldon_spec(spec):
name = spec["metadata"]["name"]
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower()+"s" # TODO: verify the "rule" for constructing plural
group, version = spec["apiVersion"].split("/")
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
api_response = api_client.patch_namespaced_custom_object(group, version, namespace, plural, name, spec)
else:
api_response = api_client.create_namespaced_custom_object(group, version, namespace, plural, spec)
# api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
LOG.info("%s ..." % str(api_response)[:160])
return api_response
def delete_deployment(params):
from kubernetes.client import V1DeleteOptions
spec = get_seldon_spec(params)
name = get_deployment_name(params) # spec["metadata"]["name"]
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower()+"s" # TODO: verify the "rule" for constructing plural
group, version = spec["apiVersion"].split("/")
del_opts = V1DeleteOptions()
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
api_response = api_client.delete_namespaced_custom_object(group, version, namespace, plural, name, del_opts)
else:
LOG.error("Could not find the Seldon deployment '%s'" % name)
return {
"status": "Error",
"details": "Could not find a Seldon deployment with name '%s'" % name
}
# api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
LOG.info("%s ..." % str(api_response)[:160])
return api_response
def get_service_name(params):
# 'SELDON_DEPLOYMENT_ID': 'fashion-mnist'
# 'PREDICTOR_ID': 'single-model'
# 'PREDICTIVE_UNIT_ID': 'classifier'
seldon_spec = get_seldon_spec(params)
spec_name = get_deployment_name(params) # seldon_spec["spec"]["name"]) # 'fashion-mnist'
predictor_name = seldon_spec["spec"]["predictors"][0]["name"] # 'single-model'
graph_name = seldon_spec["spec"]["predictors"][0]["graph"]["name"] # 'classifier' (== containers[0].name)
pod_name_prefix = "%s-%s-%s" % (spec_name, predictor_name, graph_name)
return pod_name_prefix # 'fashion-mnist-single-model-classifier'
def get_pods(params):
api_client_v1 = get_api_client_v1()
pods = api_client_v1.list_namespaced_pod(namespace="default", watch=False)
pod_name_prefix = get_service_name(params) # 'fashion-mnist-single-model-classifier'
deployment_name = get_deployment_name(params)
training_id = params["training_id"]
def match_seldon_deployment(pod):
if not pod.metadata.name.startswith(pod_name_prefix):
return False
env = {var.name: var.value for var in pod.spec.containers[0].env}
return env["SELDON_DEPLOYMENT_ID"] == deployment_name and \
env["TRAINING_ID"] == training_id
return list(filter(match_seldon_deployment, pods.items))
def get_deployment_status(params):
# AVAILABLE (classifier URL actually available)
# READY (pod status, not url availability)
# UNKNOWN (no pods)
# ERROR (CrashLoopBackOff, Succeeded - if pod terminated, will not be restarted, this should not happen)
# PENDING (Creating..., ContainerCreating, ContainersReady, PodScheduled, Pending, Initialized, Running)
pods = get_pods(params)
if not pods:
status = get_deployment_state(params) or "Unknown"
else:
status_conditions = sorted(pods[0].status.conditions, key=lambda status: status.last_transition_time, reverse=True)
status = status_conditions[0].type
if status in ["Creating...", "ContainerCreating", "ContainersReady", "PodScheduled", "Initialized", "Running"]:
status = "Pending"
if status in ["CrashLoopBackOff", "Unschedulable", "Failed", "Succeeded"]:
status = "Error"
if status == "Ready":
status = "Available"
return status.upper()
def get_deployment_state(params):
deployment_name = get_deployment_name(params)
spec = get_seldon_spec(params)
group, version = spec["apiVersion"].split("/")
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower() + "s" # TODO: verify the "rule" for constructing plural
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if deployment_name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
deployed_spec = api_client.get_namespaced_custom_object(group, version, namespace, plural, deployment_name)
env_list = deployed_spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"]
env_dict = {var["name"]: var["value"] for var in env_list}
deployed_training_id = env_dict["TRAINING_ID"]
if params["training_id"] == deployed_training_id and "status" in deployed_spec:
return deployed_spec["status"]["state"].upper() # "CREATING...", "FAILED", ...
else:
LOG.info("Could not find a Seldon deployment with name '%s'" % deployment_name)
return None
def get_ambassador_port():
from kubernetes.client.rest import ApiException
api_client_v1 = get_api_client_v1()
try:
svc = api_client_v1.read_namespaced_service(namespace="default", name="seldon-core-ambassador")
except ApiException:
svc = api_client_v1.read_namespaced_service(namespace="default", name="ambassador")
port = svc.spec.ports[0].node_port
return port
def get_deployment_name(params):
# DNS-1123 sub-domain must consist of lower case alphanumeric characters (or Seldon will raise an exception)
regex = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
deployment_name = params["deployment_name"]
if not re.match(regex, deployment_name):
LOG.error("deployment name '%s' does not pass Seldon regex filter '%s'" % (deployment_name, regex))
params["deployment_name"] = deployment_name\
.replace("_", "-")\
.replace(" ", "-")\
.lower()
return params["deployment_name"]
def get_deployment_url(params):
# "http://${PUBLIC_IP}:${SELDON_AMBASSADOR_PORT}/seldon/${deployment_name}/api/v0.1/predictions"
ip = params["public_ip"]
port = get_ambassador_port()
name = get_deployment_name(params)
url = "http://%s:%s/seldon/%s/api/v0.1/predictions" % (ip, port, name)
return url
def is_deployment_available(params):
url = get_deployment_url(params)
response = requests.options(url)
return response.status_code == 200
def get_http_method(params):
# GET get deployment status
# POST create or patch existing deployment
# PUT patch existing deployment
# PATCH patch existing deployment
# DELETE delete deployment
# return params.get("__ow_method", "POST").upper() # TODO: default for local testing only, remove
if params.get("check_status_only", False):
return "GET"
if params.get("delete_deployment", False):
return "DELETE"
return params.get("__ow_method", "POST").upper()
def run_safe(params, method):
try:
load_kube_config(params)
# method = get_http_method(params)
if method in ("POST", "PATCH", "PUT"):
# if set(deployment_parameters).issubset(params.keys()):
LOG.info("deploying '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
spec = update_seldon_spec(params)
deploy_result = deploy_seldon_spec(spec)
deployment_url = get_deployment_url(params)
deployment_state = deploy_result["status"]["state"].upper() if "status" in deploy_result \
else get_deployment_status(params)
result = {
"deployment_status": deployment_state,
"deployment_url": deployment_url,
"details": deploy_result
}
elif method == "GET":
LOG.info("get deployment status of '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
deployment_url = get_deployment_url(params)
deployment_state = get_deployment_status(params)
result = {
"deployment_status": deployment_state, # "Error" "Creating Container" "CrashLoopBackOff" "Pending"
"deployment_url": deployment_url
}
elif method == "DELETE":
LOG.info("deleting deployment for '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
delete_result = delete_deployment(params)
result = {
"status": delete_result["status"],
"details": delete_result["details"]
}
else:
result = {
"status": "Failed",
"message": "could not identify HTTP request method"
}
result["status"] = result.get("status", "Success")
return result
except Exception as e:
LOG.exception('%s: %s' % (e.__class__.__name__, str(e)))
return {
"status": "Error",
"details": {
"error": e.__class__.__name__,
"message": str(e),
"trace": traceback.format_exc()
}
}
@app.route('/', methods=['POST'])
def deployment_api_post():
if not request.json:
abort(400)
return json.dumps(run_safe(request.json,"POST"))
@app.route('/', methods=['GET'])
def deployment_api_get():
return json.dumps(run_safe(json.loads(json.dumps(request.args)),"GET"))
@app.route('/', methods=['DELETE'])
def deployment_api_delete():
return json.dumps(run_safe(json.loads(json.dumps(request.args)),"DELETE"))
@app.route('/', methods=['OPTIONS'])
def deployment_api_options():
return "200"
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
| 8,113 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/src/serve.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
from app import run_safe
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_id', type=str, help='Training model id')
parser.add_argument('--deployment_name', type=str, help='Deployment name for the seldon service')
parser.add_argument('--model_class_name', type=str, help='PyTorch model class name', default='ModelClass')
parser.add_argument('--model_class_file', type=str, help='File that contains the PyTorch model class', default='model_class.py')
parser.add_argument('--serving_image', type=str, help='Model serving images', default='aipipeline/seldon-pytorch:0.1')
args = parser.parse_args()
with open("/app/secrets/s3_url", 'r') as f:
s3_url = f.readline().strip('\'')
f.close()
with open("/app/secrets/result_bucket", 'r') as f:
bucket_name = f.readline().strip('\'')
f.close()
with open("/app/secrets/s3_access_key_id", 'r') as f:
s3_access_key_id = f.readline().strip('\'')
f.close()
with open("/app/secrets/s3_secret_access_key", 'r') as f:
s3_secret_access_key = f.readline().strip('\'')
f.close()
with open("/app/secrets/k8s_public_nodeport_ip", 'r') as f:
seldon_ip = f.readline().strip('\'')
f.close()
model_id = args.model_id
deployment_name = args.deployment_name
model_class_name = args.model_class_name
model_class_file = args.model_class_file
serving_image = args.serving_image
formData = {
"public_ip": seldon_ip,
"aws_endpoint_url": s3_url,
"aws_access_key_id": s3_access_key_id,
"aws_secret_access_key": s3_secret_access_key,
"training_results_bucket": bucket_name,
"model_file_name": "model.pt",
"deployment_name": deployment_name,
"training_id": model_id,
"container_image": serving_image,
"check_status_only": False,
"model_class_name": model_class_name,
"model_class_file": model_class_file
}
metrics = run_safe(formData, "POST")
print(metrics)
with open('/tmp/deployment_result.txt', "w") as report:
report.write(json.dumps(metrics))
print('\nThe Model is running at ' + metrics['deployment_url'])
| 8,114 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/src | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/serve/src/kube/seldon.json | {
"apiVersion": "machinelearning.seldon.io/v1alpha2",
"kind": "SeldonDeployment",
"metadata": {
"labels": {
"app": "seldon"
},
"name": "deployment-id"
},
"spec": {
"annotations": {
"project_name": "pytorch-classifier",
"deployment_version": "v1"
},
"name": "pytorch-classifier",
"oauth_key": "oauth-key",
"oauth_secret": "oauth-secret",
"predictors": [
{
"componentSpecs": [{
"spec": {
"containers": [
{
"image": "aipipeline/seldon-pytorch:0.1",
"imagePullPolicy": "IfNotPresent",
"name": "classifier",
"resources": {
"requests": {
"memory": "1Mi"
}
},
"env": [
{
"name": "MODEL_FILE_NAME",
"value": "model.pt"
},
{
"name": "TRAINING_ID",
"value": "training-abcde1234"
},
{
"name": "BUCKET_NAME",
"value": "training-results"
},
{
"name": "BUCKET_ENDPOINT_URL",
"value": "https://s3-api.us-geo.objectstorage.softlayer.net"
},
{
"name": "BUCKET_KEY",
"value": ""
},
{
"name": "BUCKET_SECRET",
"value": ""
},
{
"name": "MODEL_CLASS_NAME",
"value": "ModelClass"
},
{
"name": "MODEL_CLASS_FILE",
"value": "model_class.py"
}
]
}
],
"terminationGracePeriodSeconds": 20
}
}],
"graph": {
"children": [],
"name": "classifier",
"endpoint": {
"type": "REST"
},
"type": "MODEL"
},
"name": "single-model",
"replicas": 1,
"annotations": {
"predictor_version": "v1"
}
}
]
}
}
| 8,115 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/train/README.md |
# Fabric for Deep Learning - Train Model
## Intended Use
Train Machine Learning and Deep Learning Models remotely using Fabric for Deep Learning
## Run-Time Parameters:
Name | Description
:--- | :----------
model_def_file_path | Required. Path for model training code in object storage
manifest_file_path | Required. Path for model manifest definition in object storage
## Output:
Name | Description
:--- | :----------
output | Model training_id
## Sample
Note: the sample code below works in both IPython notebook or python code directly.
### Set sample parameters
```python
# Required Parameters
MODEL_DEF_FILE_PATH = '<Please put your path for model training code in the object storage bucket>'
MANIFEST_FILE_PATH = '<Please put your path for model manifest definition in the object storage bucket>'
```
```python
# Optional Parameters
EXPERIMENT_NAME = 'Fabric for Deep Learning - Train Model'
COMPONENT_SPEC_URI = 'https://raw.githubusercontent.com/kubeflow/pipelines/eb830cd73ca148e5a1a6485a9374c2dc068314bc/components/ibm-components/ffdl/train/component.yaml'
```
### Install KFP SDK
Install the SDK (Uncomment the code if the SDK is not installed before)
```python
#KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.12/kfp.tar.gz'
#!pip3 install $KFP_PACKAGE --upgrade
```
### Load component definitions
```python
import kfp.components as comp
ffdl_train_op = comp.load_component_from_url(COMPONENT_SPEC_URI)
display(ffdl_train_op)
```
### Here is an illustrative pipeline that uses the component
```python
import kfp.dsl as dsl
import ai_pipeline_params as params
import json
@dsl.pipeline(
name='FfDL train pipeline',
description='FfDL train pipeline'
)
def ffdl_train_pipeline(
model_def_file_path=MODEL_DEF_FILE_PATH,
manifest_file_path=MANIFEST_FILE_PATH
):
ffdl_train_op(model_def_file_path, manifest_file_path).apply(params.use_ai_pipeline_params('kfp-creds'))
```
### Compile the pipeline
```python
pipeline_func = ffdl_train_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
### Submit the pipeline for execution
```python
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
| 8,116 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/train/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Train Model - FfDL'
description: |
Train Machine Learning and Deep Learning Models remotely using Fabric for Deep Learning
metadata:
annotations: {platform: 'OpenSource'}
inputs:
- {name: model_def_file_path, description: 'Required. Path for model training code in object storage'}
- {name: manifest_file_path, description: 'Required. Path for model manifest definition in object storage'}
outputs:
- {name: output, description: 'Model training_id'}
implementation:
container:
image: docker.io/aipipeline/ffdl-train:latest
command: ['python']
args: [
-u, train.py,
--model_def_file_path, {inputValue: model_def_file_path},
--manifest_file_path, {inputValue: manifest_file_path}
]
fileOutputs:
output: /tmp/training_id.txt
| 8,117 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/train/Dockerfile | FROM python:3.6-slim
RUN pip install boto3 ruamel.yaml requests
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python", "train.py"]
| 8,118 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/train | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/ffdl/train/src/train.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import boto3
import botocore
import requests
import argparse
import time
from ruamel.yaml import YAML
import subprocess
import os
''' global initialization '''
yaml = YAML(typ='safe')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_def_file_path', type=str, help='Object storage bucket file path for the training model definition')
parser.add_argument('--manifest_file_path', type=str, help='Object storage bucket file path for the FfDL manifest')
args = parser.parse_args()
with open("/app/secrets/s3_url", 'r') as f:
s3_url = f.readline().strip('\'')
f.close()
with open("/app/secrets/training_bucket", 'r') as f:
data_bucket_name = f.readline().strip('\'')
f.close()
with open("/app/secrets/result_bucket", 'r') as f:
result_bucket_name = f.readline().strip('\'')
f.close()
with open("/app/secrets/s3_access_key_id", 'r') as f:
s3_access_key_id = f.readline().strip('\'')
f.close()
with open("/app/secrets/s3_secret_access_key", 'r') as f:
s3_secret_access_key = f.readline().strip('\'')
f.close()
with open("/app/secrets/ffdl_rest", 'r') as f:
ffdl_rest = f.readline().strip('\'')
f.close()
model_def_file_path = args.model_def_file_path
manifest_file_path = args.manifest_file_path
''' Download FfDL CLI for log streaming '''
res = requests.get('https://github.com/IBM/FfDL/raw/master/cli/bin/ffdl-linux', allow_redirects=True)
open('ffdl', 'wb').write(res.content)
subprocess.call(['chmod', '755', 'ffdl'])
''' Download the training model definition and FfDL manifest '''
client = boto3.resource(
's3',
endpoint_url=s3_url,
aws_access_key_id=s3_access_key_id,
aws_secret_access_key=s3_secret_access_key,
)
try:
client.Bucket(data_bucket_name).download_file(model_def_file_path, 'model.zip')
client.Bucket(data_bucket_name).download_file(manifest_file_path, 'manifest.yml')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
''' Update FfDL manifest with the corresponding object storage credentials '''
f = open('manifest.yml', 'r')
manifest = yaml.safe_load(f.read())
f.close()
manifest['data_stores'][0]['connection']['auth_url'] = s3_url
manifest['data_stores'][0]['connection']['user_name'] = s3_access_key_id
manifest['data_stores'][0]['connection']['password'] = s3_secret_access_key
manifest['data_stores'][0]['training_data']['container'] = data_bucket_name
manifest['data_stores'][0]['training_results']['container'] = result_bucket_name
f = open('manifest.yml', 'w')
yaml.default_flow_style = False
yaml.dump(manifest, f)
f.close()
''' Submit Training job to FfDL and monitor its status '''
files = {
'manifest': open('manifest.yml', 'rb'),
'model_definition': open('model.zip', 'rb')
}
headers = {
'accept': 'application/json',
'Authorization': 'test',
'X-Watson-Userinfo': 'bluemix-instance-id=test-user'
}
response = requests.post(ffdl_rest + '/v1/models?version=2017-02-13', files=files, headers=headers)
print(response.json())
id = response.json()['model_id']
print('Training job has started, please visit the FfDL UI for more details')
training_status = 'PENDING'
os.environ['DLAAS_PASSWORD'] = 'test'
os.environ['DLAAS_USERNAME'] = 'test-user'
os.environ['DLAAS_URL'] = ffdl_rest
while training_status != 'COMPLETED':
response = requests.get(ffdl_rest + '/v1/models/' + id + '?version=2017-02-13', headers=headers)
training_status = response.json()['training']['training_status']['status']
print('Training Status: ' + training_status)
if training_status == 'COMPLETED':
with open('/tmp/training_id.txt', "w") as report:
report.write(json.dumps(id))
exit(0)
if training_status == 'FAILED':
print('Training failed. Exiting...')
exit(1)
if training_status == 'PROCESSING':
counter = 0
process = subprocess.Popen(['./ffdl', 'logs', id, '--follow'], stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output:
print(output.strip())
elif process.poll() is not None:
break
else:
counter += 1
time.sleep(5)
if counter > 5:
break
time.sleep(10)
| 8,119 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/store/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Store model - Watson Machine Learning'
description: |
Store and persistent trained model on Watson Machine Learning.
metadata:
annotations: {platform: 'IBM Watson Machine Learning'}
inputs:
- {name: run_uid, description: 'Required. UID for the Watson Machine Learning training-runs'}
- {name: model_name, description: 'Required. Model Name to store on Watson Machine Learning'}
- {name: framework, description: 'ML/DL Model Framework', default: 'tensorflow'}
- {name: framework_version, description: 'Model Framework version', default: '1.14'}
- {name: runtime_version, description: 'Model Code runtime version', default: '3.6'}
outputs:
- {name: model_uid, description: 'UID for the stored model on Watson Machine Learning'}
implementation:
container:
image: docker.io/aipipeline/wml-store:latest
command: ['python3']
args: [
-u, /app/wml-store.py,
--run-uid, {inputValue: run_uid},
--model-name, {inputValue: model_name},
--framework, {inputValue: framework},
--framework-version, {inputValue: framework_version},
--runtime-version, {inputValue: runtime_version},
--output-model-uid-path, {outputPath: model_uid}
]
| 8,120 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/store/Dockerfile | FROM python:3.6-slim
# Directories for model codes and secrets
RUN mkdir /app
RUN mkdir /app/secrets
# Watson studio and machine learning python client
RUN pip install watson-machine-learning-client-V4 minio
# Python functions with endpoints to Watson Machine Learning
COPY src/wml-store.py /app
| 8,121 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/store | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/store/src/wml-store.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# define the function to store the model
def getSecret(secret):
with open(secret, 'r') as f:
res = f.readline().strip('\'')
f.close()
return res
def store(wml_model_name, run_uid, framework, framework_version, runtime_version, output_model_uid_path):
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from pathlib import Path
# retrieve credentials
wml_url = getSecret("/app/secrets/wml_url")
wml_instance_id = getSecret("/app/secrets/wml_instance_id")
wml_apikey = getSecret("/app/secrets/wml_apikey")
runtime_uid = framework + '_' + framework_version + '-py' + runtime_version
runtime_type = framework + '_' + framework_version
print("runtime_uid:", runtime_uid)
print("runtime_type:", runtime_type)
# set up the WML client
wml_credentials = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
client = WatsonMachineLearningAPIClient(wml_credentials)
# store the model
meta_props_tf = {
client.repository.ModelMetaNames.NAME: wml_model_name,
client.repository.ModelMetaNames.RUNTIME_UID: runtime_uid,
client.repository.ModelMetaNames.TYPE: runtime_type
}
model_details = client.repository.store_model(run_uid, meta_props=meta_props_tf)
model_uid = client.repository.get_model_uid(model_details)
print("model_uid: ", model_uid)
Path(output_model_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(output_model_uid_path).write_text(model_uid)
import time
time.sleep(120)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model-name', type=str, required=True)
parser.add_argument('--run-uid', type=str, required=True)
parser.add_argument('--framework', type=str, required=True)
parser.add_argument('--framework-version', type=str, required=True)
parser.add_argument('--runtime-version', type=str, required=True)
parser.add_argument('--output-model-uid-path', type=str, default='/tmp/model_uid')
args = parser.parse_args()
store(args.model_name,
args.run_uid,
args.framework,
args.framework_version,
args.runtime_version,
args.output_model_uid_path)
| 8,122 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/subscribe/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Subscribe - Watson OpenScale'
description: |
Binding deployed models and subscribe them to Watson OpenScale service.
metadata:
annotations: {platform: 'IBM Watson OpenScale'}
inputs:
- {name: model_name, description: 'Deployed model name.', default: 'AIOS Spark German Risk Model - Final'}
- {name: model_uid, description: 'Deployed model uid.', default: 'dummy uid'}
- {name: aios_schema, description: 'OpenScale Schema Name', default: 'data_mart_credit_risk'}
- {name: label_column, description: 'Model label column name.', default: 'Risk'}
- {name: aios_manifest_path, description: 'Object storage file path for the aios manifest file', default: ''}
- {name: bucket_name, description: 'Object storage bucket name', default: 'dummy-bucket-name'}
- {name: problem_type, description: 'Model problem type', default: 'BINARY_CLASSIFICATION'}
outputs:
- {name: model_name, description: 'Deployed model name.'}
implementation:
container:
image: docker.io/aipipeline/subscribe:latest
command: ['python']
args: [
-u, subscribe.py,
--model_name, {inputValue: model_name},
--model_uid, {inputValue: model_uid},
--aios_schema, {inputValue: aios_schema},
--label_column, {inputValue: label_column},
--aios_manifest_path, {inputValue: aios_manifest_path},
--bucket_name, {inputValue: bucket_name},
--problem_type, {inputValue: problem_type}
]
fileOutputs:
model_name: /tmp/model_name
| 8,123 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/subscribe/Dockerfile | FROM python:3.6.8-stretch
RUN pip install --upgrade pip
RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale Minio --no-cache | tail -n 1
RUN pip install psycopg2-binary | tail -n 1
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python"]
CMD ["subscribe.py"]
| 8,124 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/subscribe | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/subscribe/src/subscribe.py | import json
import argparse
import re
from ibm_ai_openscale import APIClient
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--aios_schema', type=str, help='AI OpenScale Schema Name', default="data_mart_credit_risk")
parser.add_argument('--model_name', type=str, help='Deployed model name', default="AIOS Spark German Risk Model - Final")
parser.add_argument('--model_uid', type=str, help='Deployed model uid', default="dummy uid")
parser.add_argument('--label_column', type=str, help='Model label column name', default="Risk")
parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default="")
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--problem_type', type=str, help='Model problem type', default="BINARY_CLASSIFICATION")
args = parser.parse_args()
aios_schema = args.aios_schema
model_name = args.model_name
model_uid = args.model_uid
label_column = args.label_column
aios_manifest_path = args.aios_manifest_path
cos_bucket_name = args.bucket_name
problem_type = args.problem_type
wml_url = get_secret_creds("/app/secrets/wml_url")
wml_instance_id = get_secret_creds("/app/secrets/wml_instance_id")
wml_apikey = get_secret_creds("/app/secrets/wml_apikey")
aios_guid = get_secret_creds("/app/secrets/aios_guid")
cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key")
postgres_uri = get_secret_creds("/app/secrets/postgres_uri")
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Make sure http scheme is not exist for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
WML_CREDENTIALS = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
AIOS_CREDENTIALS = {
"instance_guid": aios_guid,
"apikey": cloud_api_key,
"url": "https://api.aiopenscale.cloud.ibm.com"
}
if postgres_uri == '':
POSTGRES_CREDENTIALS = None
else:
POSTGRES_CREDENTIALS = {
"uri": postgres_uri
}
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS)
print('AIOS client version:' + ai_client.version)
''' Setup Postgres SQL and AIOS binding '''
SCHEMA_NAME = aios_schema
try:
data_mart_details = ai_client.data_mart.get_details()
if 'internal_database' in data_mart_details['database_configuration'] and data_mart_details['database_configuration']['internal_database']:
if POSTGRES_CREDENTIALS:
print('Using existing internal datamart')
else:
print('Switching to external datamart')
ai_client.data_mart.delete(force=True)
create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME)
ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME)
else:
print('Using existing external datamart')
except:
if POSTGRES_CREDENTIALS:
print('Setting up internal datamart')
ai_client.data_mart.setup(internal_db=True)
else:
print('Setting up external datamart')
create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME)
ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME)
data_mart_details = ai_client.data_mart.get_details()
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance(WML_CREDENTIALS))
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
bindings_details = ai_client.data_mart.bindings.get_details()
print('\nWML binding ID is ' + binding_uid + '\n')
''' Create subscriptions '''
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for subscription in subscriptions_uids:
sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
if sub_name == model_name:
ai_client.data_mart.subscriptions.delete(subscription)
print('Deleted existing subscription for', model_name)
''' Obtain feature and categorical columns '''
# Download aios manifest file
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_bucket_name, aios_manifest_path, aios_manifest_path)
# Extract necessary column names
feature_columns = []
categorical_columns = []
with open(aios_manifest_path) as f:
aios_manifest = json.load(f)
OUTPUT_DATA_SCHEMA = {'fields': aios_manifest['model_schema'], 'type': 'struct'}
for column in aios_manifest['model_schema']:
if column['metadata'].get('modeling_role', '') == 'feature':
feature_columns.append(column['name'])
if column['metadata'].get('measure', '') == 'discrete':
categorical_columns.append(column['name'])
f.close()
PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION
if problem_type == 'BINARY_CLASSIFICATION':
PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION
elif problem_type == 'MULTICLASS_CLASSIFICATION':
PROBLEMTYPE = ProblemType.MULTICLASS_CLASSIFICATION
elif problem_type == 'REGRESSION':
PROBLEMTYPE = ProblemType.REGRESSION
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
label_column=label_column,
input_data_type=InputDataType.STRUCTURED,
problem_type=PROBLEMTYPE,
prediction_column='predictedLabel',
probability_column='probability',
feature_columns=feature_columns,
categorical_columns=categorical_columns
))
if subscription is None:
print('Exists already')
# subscription already exists; get the existing one
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name:
subscription = ai_client.data_mart.subscriptions.get(sub)
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
print(subscription.get_details())
''' Scoring the model and make sure the subscriptions are setup properly '''
credit_risk_scoring_endpoint = None
deployment_uid = subscription.get_deployment_uids()[0]
print('\n' + deployment_uid + '\n')
for deployment in wml_client.deployments.get_details()['resources']:
if deployment_uid in deployment['metadata']['guid']:
credit_risk_scoring_endpoint = deployment['entity']['scoring_url']
print('Scoring endpoint is: ' + credit_risk_scoring_endpoint + '\n')
with open("/tmp/model_name", "w") as report:
report.write(model_name)
| 8,125 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_fairness/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Monitor Fairness - Watson OpenScale'
description: |
Enable model fairness monitoring on Watson OpenScale.
metadata:
annotations: {platform: 'IBM Watson OpenScale'}
inputs:
- {name: model_name, description: 'Deployed model name on OpenScale.', default: 'AIOS Spark German Risk Model - Final'}
- {name: fairness_threshold, description: 'Amount of threshold for fairness monitoring.', default: '0.95'}
- {name: fairness_min_records, description: 'Minimum amount of records for performing a fairness monitor.', default: '5'}
- {name: aios_manifest_path, description: 'Object storage file path for the aios manifest file.', default: 'aios.json'}
- {name: cos_bucket_name, description: 'Object storage bucket name.', default: 'bucket-name'}
- {name: data_filename, description: 'Name of the data binary', default: ''}
implementation:
container:
image: docker.io/aipipeline/monitor_fairness:latest
command: ['python']
args: [
-u, monitor_fairness.py,
--model_name, {inputValue: model_name},
--fairness_threshold, {inputValue: fairness_threshold},
--fairness_min_records, {inputValue: fairness_min_records},
--aios_manifest_path, {inputValue: aios_manifest_path},
--cos_bucket_name, {inputValue: cos_bucket_name},
--data_filename, {inputValue: data_filename}
]
| 8,126 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_fairness/Dockerfile | FROM python:3.6.8-stretch
RUN pip install --upgrade pip
RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale Minio pandas --no-cache | tail -n 1
RUN pip install psycopg2-binary | tail -n 1
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python"]
CMD ["monitor_fairness.py"]
| 8,127 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_fairness | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_fairness/src/monitor_fairness.py | import json
import argparse
import re
from ibm_ai_openscale import APIClient
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
from minio import Minio
import pandas as pd
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, help='Deployed model name', default='AIOS Spark German Risk Model - Final')
parser.add_argument('--fairness_threshold', type=float, help='Amount of threshold for fairness monitoring', default=0.95)
parser.add_argument('--fairness_min_records', type=int, help='Minimum amount of records for performing a fairness monitor', default=5)
parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default='aios.json')
parser.add_argument('--cos_bucket_name', type=str, help='Object storage bucket name', default='bucket-name')
parser.add_argument('--data_filename', type=str, help='Name of the data binary', default="")
args = parser.parse_args()
model_name = args.model_name
fairness_threshold = args.fairness_threshold
fairness_min_records = args.fairness_min_records
cos_bucket_name = args.cos_bucket_name
aios_manifest_path = args.aios_manifest_path
data_filename = args.data_filename
aios_guid = get_secret_creds("/app/secrets/aios_guid")
cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key")
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Remove possible http scheme for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
''' Upload data to Cloud object storage '''
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_bucket_name, aios_manifest_path, 'aios.json')
print('Fairness definition file ' + aios_manifest_path + ' is downloaded')
cos.fget_object(cos_bucket_name, data_filename, data_filename)
pd_data = pd.read_csv(data_filename, sep=",", header=0, engine='python')
print('training data ' + data_filename + ' is downloaded and loaded')
""" Load manifest JSON file """
with open('aios.json') as f:
aios_manifest = json.load(f)
""" Initiate AIOS client """
AIOS_CREDENTIALS = {
"instance_guid": aios_guid,
"apikey": cloud_api_key,
"url": "https://api.aiopenscale.cloud.ibm.com"
}
ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS)
print('AIOS client version:' + ai_client.version)
''' Setup fairness monitoring '''
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name:
subscription = ai_client.data_mart.subscriptions.get(sub)
feature_list = []
for feature in aios_manifest['fairness_features']:
feature_list.append(Feature(feature['feature_name'], majority=feature['majority'], minority=feature['minority'], threshold=feature['threshold']))
subscription.fairness_monitoring.enable(
features=feature_list,
favourable_classes=aios_manifest['fairness_favourable_classes'],
unfavourable_classes=aios_manifest['fairness_unfavourable_classes'],
min_records=fairness_min_records,
training_data=pd_data
)
run_details = subscription.fairness_monitoring.run()
print('Fairness monitoring is enabled.')
| 8,128 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_quality/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Monitor quality - Watson OpenScale'
description: |
Enable model quality monitoring on Watson OpenScale.
metadata:
annotations: {platform: 'IBM Watson OpenScale'}
inputs:
- {name: model_name, description: 'Deployed model name on OpenScale.', default: 'AIOS Spark German Risk Model - Final'}
- {name: quality_threshold, description: 'Amount of threshold for quality monitoring', default: '0.7'}
- {name: quality_min_records, description: 'Minimum amount of records for performing a quality monitor.', default: '5'}
implementation:
container:
image: docker.io/aipipeline/monitor_quality:latest
command: ['python']
args: [
-u, monitor_quality.py,
--model_name, {inputValue: model_name},
--quality_threshold, {inputValue: quality_threshold},
--quality_min_records, {inputValue: quality_min_records}
]
| 8,129 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_quality/Dockerfile | FROM python:3.6.8-stretch
RUN pip install --upgrade pip
RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale --no-cache | tail -n 1
RUN pip install psycopg2-binary | tail -n 1
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python"]
CMD ["monitor_quality.py"]
| 8,130 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_quality | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/manage/monitor_quality/src/monitor_quality.py | import json
import argparse
from ibm_ai_openscale import APIClient
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, help='Deployed model name', default="AIOS Spark German Risk Model - Final")
parser.add_argument('--quality_threshold', type=float, help='Amount of threshold for quality monitoring', default=0.7)
parser.add_argument('--quality_min_records', type=int, help='Minimum amount of records for performing a quality monitor', default=5)
args = parser.parse_args()
model_name = args.model_name
quality_threshold = args.quality_threshold
quality_min_records = args.quality_min_records
aios_guid = get_secret_creds("/app/secrets/aios_guid")
cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key")
AIOS_CREDENTIALS = {
"instance_guid": aios_guid,
"apikey": cloud_api_key,
"url": "https://api.aiopenscale.cloud.ibm.com"
}
ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS)
print('AIOS client version:' + ai_client.version)
''' Setup quality monitoring '''
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name:
subscription = ai_client.data_mart.subscriptions.get(sub)
subscription.quality_monitoring.enable(threshold=quality_threshold, min_records=quality_min_records)
# Runs need to post the minial payload records in order to trigger the monitoring run.
# run_details = subscription.quality_monitoring.run()
print('Quality monitoring is enabled.')
| 8,131 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/train/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Train Model - Watson Machine Learning'
description: |
Train Machine Learning and Deep Learning Models in the Cloud using Watson Machine Learning
metadata:
annotations: {platform: 'IBM Watson Machine Learning'}
inputs:
- {name: train_code, description: 'Required. Code for training ML/DL models'}
- {name: execution_command, description: 'Required. Execution command to start the model training.'}
- {name: config, description: 'Credential configfile is properly created.', default: 'secret_name'}
- {name: framework, description: 'ML/DL Model Framework', default: 'tensorflow'}
- {name: framework_version, description: 'Model Framework version', default: '1.14'}
- {name: runtime, description: 'Model Code runtime language', default: 'python'}
- {name: runtime_version, description: 'Model Code runtime version', default: '3.6'}
- {name: run_definition, description: 'Name for the Watson Machine Learning training definition', default: 'python-tensorflow-definition'}
- {name: run_name, description: 'Name for the Watson Machine Learning training-runs', default: 'python-tensorflow-run'}
- {name: author_name, description: 'Name of this training job author', default: 'default-author'}
- {name: compute_name, description: 'Name of the compute tiers, in WML is the gpu count', default: 'k80'}
- {name: compute_nodes, description: 'Number of compute machine', default: '1'}
outputs:
- {name: run_uid, description: 'UID for the Watson Machine Learning training-runs'}
- {name: training_uid, description: 'Training Location UID for the Watson Machine Learning training-runs'}
implementation:
container:
image: docker.io/aipipeline/wml-train:latest
command: ['python3']
args: [
-u, /app/wml-train.py,
--config, {inputValue: config},
--train-code, {inputValue: train_code},
--execution-command, {inputValue: execution_command},
--framework, {inputValue: framework},
--framework-version, {inputValue: framework_version},
--runtime, {inputValue: runtime},
--runtime-version, {inputValue: runtime_version},
--run-definition, {inputValue: run_definition},
--run-name, {inputValue: run_name},
--author-name, {inputValue: author_name},
--compute-name, {inputValue: compute_name},
--compute-nodes,{inputValue: compute_nodes},
--output-run-uid-path, {outputPath: run_uid},
--output-training-uid-path, {outputPath: training_uid}
]
| 8,132 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/train/Dockerfile | FROM python:3.6-slim
# Directories for model codes and secrets
RUN mkdir /app
RUN mkdir /app/secrets
# Watson studio and machine learning python client
RUN pip install watson-machine-learning-client-V4 minio
# Python functions with endpoints to Watson Machine Learning
COPY src/wml-train.py /app
| 8,133 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/train | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/train/src/wml-train.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# define the function to train a model on wml
def getSecret(secret):
with open(secret, 'r') as f:
res = f.readline().strip('\'')
f.close()
return res
def train(args):
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
from urllib.parse import urlsplit
from pathlib import Path
import os,time
wml_train_code = args.train_code
wml_execution_command = args.execution_command.strip('\'')
wml_framework_name = args.framework if args.framework else 'tensorflow'
wml_framework_version = args.framework_version if args.framework_version else '1.14'
wml_runtime_name = args.runtime if args.runtime else 'python'
wml_runtime_version = args.runtime_version if args.runtime_version else '3.6'
wml_run_definition = args.run_definition if args.run_definition else 'python-tensorflow-definition'
wml_run_name = args.run_name if args.run_name else 'python-tensorflow-run'
wml_author_name = args.author_name if args.author_name else 'default-author'
wml_compute_name = args.compute_name if args.compute_name else 'k80'
wml_compute_nodes = args.compute_nodes if args.compute_nodes else '1'
wml_runtime_version_v4 = wml_framework_version + '-py' + wml_runtime_version
wml_compute_nodes_v4 = int(wml_compute_nodes)
# retrieve credentials
wml_url = getSecret("/app/secrets/wml_url")
wml_apikey = getSecret("/app/secrets/wml_apikey")
wml_instance_id = getSecret("/app/secrets/wml_instance_id")
wml_data_source_type = getSecret("/app/secrets/wml_data_source_type")
cos_endpoint = getSecret("/app/secrets/cos_endpoint")
cos_endpoint_parts = urlsplit(cos_endpoint)
if bool(cos_endpoint_parts.scheme):
cos_endpoint_hostname = cos_endpoint_parts.hostname
else:
cos_endpoint_hostname = cos_endpoint
cos_endpoint = 'https://' + cos_endpoint
cos_access_key = getSecret("/app/secrets/cos_access_key")
cos_secret_key = getSecret("/app/secrets/cos_secret_key")
cos_input_bucket = getSecret("/app/secrets/cos_input_bucket")
cos_output_bucket = getSecret("/app/secrets/cos_output_bucket")
# download model code
model_code = os.path.join('/app', wml_train_code)
cos = Minio(cos_endpoint_hostname,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_input_bucket, wml_train_code, model_code)
# set up the WML client
wml_credentials = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
client = WatsonMachineLearningAPIClient(wml_credentials)
# define the model
lib_meta = {
client.runtimes.LibraryMetaNames.NAME: wml_run_definition,
client.runtimes.LibraryMetaNames.VERSION: wml_framework_version,
client.runtimes.LibraryMetaNames.FILEPATH: model_code,
client.runtimes.LibraryMetaNames.PLATFORM: {"name": wml_framework_name, "versions": [wml_framework_version]}
}
# check exisiting library
library_details = client.runtimes.get_library_details()
for library_detail in library_details['resources']:
if library_detail['entity']['name'] == wml_run_definition:
# Delete library if exist because we cannot update model_code
uid = client.runtimes.get_library_uid(library_detail)
client.repository.delete(uid)
break
custom_library_details = client.runtimes.store_library(lib_meta)
custom_library_uid = client.runtimes.get_library_uid(custom_library_details)
# create a pipeline with the model definitions included
doc = {
"doc_type": "pipeline",
"version": "2.0",
"primary_pipeline": wml_framework_name,
"pipelines": [{
"id": wml_framework_name,
"runtime_ref": "hybrid",
"nodes": [{
"id": "training",
"type": "model_node",
"op": "dl_train",
"runtime_ref": wml_run_name,
"inputs": [],
"outputs": [],
"parameters": {
"name": "tf-mnist",
"description": wml_run_definition,
"command": wml_execution_command,
"training_lib_href": "/v4/libraries/"+custom_library_uid,
"compute": {
"name": wml_compute_name,
"nodes": wml_compute_nodes_v4
}
}
}]
}],
"runtimes": [{
"id": wml_run_name,
"name": wml_framework_name,
"version": wml_runtime_version_v4
}]
}
metadata = {
client.repository.PipelineMetaNames.NAME: wml_run_name,
client.repository.PipelineMetaNames.DOCUMENT: doc
}
pipeline_id = client.pipelines.get_uid(client.repository.store_pipeline(meta_props=metadata))
client.pipelines.get_details(pipeline_id)
# start the training run for v4
metadata = {
client.training.ConfigurationMetaNames.TRAINING_RESULTS_REFERENCE: {
"name": "training-results-reference_name",
"connection": {
"endpoint_url": cos_endpoint,
"access_key_id": cos_access_key,
"secret_access_key": cos_secret_key
},
"location": {
"bucket": cos_output_bucket
},
"type": wml_data_source_type
},
client.training.ConfigurationMetaNames.TRAINING_DATA_REFERENCES:[{
"name": "training_input_data",
"type": wml_data_source_type,
"connection": {
"endpoint_url": cos_endpoint,
"access_key_id": cos_access_key,
"secret_access_key": cos_secret_key
},
"location": {
"bucket": cos_input_bucket
}
}],
client.training.ConfigurationMetaNames.PIPELINE_UID: pipeline_id
}
training_id = client.training.get_uid(client.training.run(meta_props=metadata))
print("training_id", client.training.get_details(training_id))
print("get status", client.training.get_status(training_id))
# for v4
run_details = client.training.get_details(training_id)
run_uid = training_id
# print logs
client.training.monitor_logs(run_uid)
client.training.monitor_metrics(run_uid)
# checking the result
status = client.training.get_status(run_uid)
print("status: ", status)
while status['state'] != 'completed':
time.sleep(20)
status = client.training.get_status(run_uid)
print(status)
Path(args.output_run_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_run_uid_path).write_text(run_uid)
# Get training details
training_details = client.training.get_details(run_uid)
print("training_details", training_details)
training_uid = training_details['entity']['results_reference']['location']['training']
Path(args.output_training_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_training_uid_path).write_text(training_uid)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train-code', type=str, required=True)
parser.add_argument('--execution-command', type=str, required=True)
parser.add_argument('--framework', type=str)
parser.add_argument('--framework-version', type=str)
parser.add_argument('--runtime', type=str)
parser.add_argument('--runtime-version', type=str)
parser.add_argument('--run-definition', type=str)
parser.add_argument('--run-name', type=str)
parser.add_argument('--author-name', type=str)
parser.add_argument('--config', type=str, default="secret_name")
parser.add_argument('--compute-name', type=str)
parser.add_argument('--compute-nodes', type=str)
parser.add_argument('--output-run-uid-path', type=str, default="/tmp/run_uid")
parser.add_argument('--output-training-uid-path', type=str, default="/tmp/training_uid")
args = parser.parse_args()
# Check secret name is not empty
if (not args.config):
print("Secret for this pipeline is not properly created, exiting with status 1...")
exit(1)
train(args)
| 8,134 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/deploy/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Deploy Model - Watson Machine Learning'
description: |
Deploy stored model on Watson Machine Learning as a web service.
metadata:
annotations: {platform: 'IBM Watson Machine Learning'}
inputs:
- {name: model_uid, description: 'Required. UID for the stored model on Watson Machine Learning'}
- {name: model_name, description: 'Required. Model Name on Watson Machine Learning'}
- {name: scoring_payload, description: 'Sample Payload file name in the object storage', default: ''}
- {name: deployment_name, description: 'Deployment Name on Watson Machine Learning', default: ''}
outputs:
- {name: scoring_endpoint, description: 'Link to the deployed model web service'}
- {name: model_uid, description: 'UID for the stored model on Watson Machine Learning'}
implementation:
container:
image: docker.io/aipipeline/wml-deploy:latest
command: ['python']
args: [
-u, /app/wml-deploy.py,
--model-uid, {inputValue: model_uid},
--model-name, {inputValue: model_name},
--scoring-payload, {inputValue: scoring_payload},
--deployment-name, {inputValue: deployment_name},
--output-scoring-endpoint-path, {outputPath: scoring_endpoint},
--output-model-uid-path, {outputPath: model_uid}
]
| 8,135 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/deploy/Dockerfile | FROM python:3.6-slim
# Directories for model codes and secrets
RUN mkdir /app
RUN mkdir /app/secrets
# Watson studio and machine learning python client
RUN pip install watson-machine-learning-client-V4 minio
# Python functions with endpoints to Watson Machine Learning
COPY src/wml-deploy.py /app
| 8,136 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/deploy | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/watson/deploy/src/wml-deploy.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# define the function to deploy the model
def getSecret(secret):
with open(secret, 'r') as f:
res = f.readline().strip('\'')
f.close()
return res
def deploy(args):
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
from pathlib import Path
import os
import re
wml_model_name = args.model_name
model_uid = args.model_uid
wml_scoring_payload = args.scoring_payload if args.scoring_payload else ''
deployment_name = args.deployment_name if args.deployment_name else wml_model_name
# retrieve credentials
wml_url = getSecret("/app/secrets/wml_url")
wml_instance_id = getSecret("/app/secrets/wml_instance_id")
wml_apikey = getSecret("/app/secrets/wml_apikey")
# set up the WML client
wml_credentials = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
client = WatsonMachineLearningAPIClient(wml_credentials)
client.deployments.list()
# deploy the model
meta_props = {
client.deployments.ConfigurationMetaNames.NAME: deployment_name,
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
deployment_details = client.deployments.create(model_uid, meta_props)
scoring_endpoint = client.deployments.get_scoring_href(deployment_details)
deployment_uid = client.deployments.get_uid(deployment_details)
print("deployment_uid: ", deployment_uid)
if wml_scoring_payload:
# download scoring payload if exist
cos_endpoint = getSecret("/app/secrets/cos_endpoint")
cos_access_key = getSecret("/app/secrets/cos_access_key")
cos_secret_key = getSecret("/app/secrets/cos_secret_key")
cos_input_bucket = getSecret("/app/secrets/cos_input_bucket")
# Make sure http scheme is not exist for Minio
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
payload_file = os.path.join('/app', wml_scoring_payload)
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key)
cos.fget_object(cos_input_bucket, wml_scoring_payload, payload_file)
# scoring the deployment
import json
with open(payload_file) as data_file:
test_data = json.load(data_file)
payload = {client.deployments.ScoringMetaNames.INPUT_DATA: [test_data['payload']]}
data_file.close()
print("Scoring result: ")
result = client.deployments.score(deployment_uid, payload)
else:
result = 'Scoring payload is not provided'
print(result)
Path(args.output_scoring_endpoint_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_scoring_endpoint_path).write_text(scoring_endpoint)
Path(args.output_model_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_model_uid_path).write_text(model_uid)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model-name', type=str, required=True)
parser.add_argument('--model-uid', type=str, required=True)
parser.add_argument('--deployment-name', type=str)
parser.add_argument('--scoring-payload', type=str)
parser.add_argument('--output-scoring-endpoint-path', type=str, default='/tmp/scoring_endpoint')
parser.add_argument('--output-model-uid-path', type=str, default='/tmp/model_uid')
args = parser.parse_args()
deploy(args)
| 8,137 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Train Spark Model - IBM Cloud'
description: |
Train a Spark Model using IBM Cloud Spark Service
metadata:
annotations: {platform: 'IBM Cloud Spark Service'}
inputs:
- {name: bucket_name, description: 'Required. Object storage bucket name'}
- {name: data_filename, description: 'Required. Name of the data binary'}
- {name: model_filename, description: 'Required. Name of the training model file'}
- {name: spark_entrypoint, description: 'Required. Entrypoint command for training the spark model'}
outputs:
- {name: model_filepath, description: 'Spark Model binary filepath'}
- {name: train_data_filepath, description: 'Spark training data filepath'}
implementation:
container:
image: docker.io/aipipeline/train_spark:latest
command: ['python']
args: [
-u, train_spark.py,
--bucket_name, {inputValue: bucket_name},
--data_filename, {inputValue: data_filename},
--model_filename, {inputValue: model_filename},
--spark_entrypoint, {inputValue: spark_entrypoint}
]
fileOutputs:
model_filepath: /tmp/model_filepath
train_data_filepath: /tmp/train_data_filepath
| 8,138 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark/Dockerfile | FROM python:3.6.8-stretch
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
| 8,139 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark/src/spark-submit.sh | #!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
#
# This script performs the following steps:
# 1. Uploads local files to the cluster host (i.e. '--master').
# The files it uploads are specified in the following parameters:
# --files
# --jars
# --py-files
# The application JAR file or python file
# If you want to use files already on the spark cluster, you can disable
# the uploading of files by setting operating system environment variables
# described below. Uploaded files will be placed on the cluster at
# <TENANT_ID>/data/<generated-UUID>/
# 2. Re-writes paths for files uploaded to the cluster. The re-written paths
# are used when calling submit REST API.
# 3. Gets returning spark-submit's submission ID and periodically polls for status
# of the job using the submission ID.
# 4. When the job is FINISHED, downloads 'stdout' and 'stderr' from the
# cluster.
# 5. Delete the job workspace folder <TENANT_ID>/data/<generated-UUID>/ on the cluster
#
# Before running this script, operating system variables must be set.
# Optional:
# SS_APP_MAIN_UPLOAD=<true|false> # Default: 'true' application jar file is uploaded.
# SS_FILES_UPLOAD=<true|false> # Default: 'true'. '--files' and "--py-files" files are uploaded.
# SS_JARS_UPLOAD=<true|false> # Default: 'true'. '--jars' files are uploaded.
# SS_LOG_ENABLE=<true|false> # Default: 'true'. Execution log is created.
#
# VCAP information needs to be made available to this program in the '--vcap'
# parameter. The VCAP information is obtained from your BlueMix application.
# Here is one way to create a file from your VCAP:
# cat <<EOT > ~/vcap.json
# {
# "credentials": {
# "tenant_id": "xxxxxx",
# "tenant_id_full": "xxxxxx",
# "cluster_master_url": "https://x.x.x.x",
# "instance_id": "xxxxxx",
# "tenant_secret": "xxxxx",
# "plan": "ibm.SparkService.PayGoPersonal"
# }
# }
# }
# EOT
#
# Example command to run:
#
# ./spark-submit.sh \
# --vcap ~/vcap.json \
# --deploy-mode cluster \
# --class com.ibm.sparkservice.App \
# --master https://x.x.x.x\
# --jars /path/to/mock-library-1.0.jar,/path/to/mock-utils-1.0.jar \
# ~/mock-app-1.0.jar
#
#
###############################################################################
invokeCommand="$(basename $0) $@"
# -- User-modifiable variables ------------------------------------------------
# To modify, set the operating system environment variable to the desired value.
if [ -z ${SS_LOG_ENABLE} ]; then SS_LOG_ENABLE=true; fi # Enable detailed logging
if [ -z ${SS_APP_MAIN_UPLOAD} ]; then SS_APP_MAIN_UPLOAD=true; fi # If true, copy the local application JAR or python file to the spark cluster
if [ -z ${SS_JARS_UPLOAD} ]; then SS_JARS_UPLOAD=true; fi # If true, copy the local JAR files listed in "--jars" to the spark cluster.
if [ -z ${SS_FILES_UPLOAD} ]; then SS_FILES_UPLOAD=true; fi # If true, copy the local files listed in "--files" and "--py-files" to the spark cluster.
if [ -z ${SS_POLL_INTERVAL} ]; then SS_POLL_INTERVAL=10; fi # Number of seconds until script polls spark cluster again.
if [ -z ${SS_SPARK_WORK_DIR} ]; then SS_SPARK_WORK_DIR="workdir"; fi # Work directory on spark cluster
if [ -z ${SS_DEBUG} ]; then SS_DEBUG=false; fi # Detailed debugging
# -- Set working environment variables ----------------------------------------
if [ "${SS_DEBUG}" = "true" ]
then
set -x
fi
EXECUTION_TIMESTAMP="$(date +'%s%N')"
APP_MAIN=
app_parms=
FILES=
JARS=
PY_FILES=
CLASS=
APP_NAME=
DEPLOY_MODE=
LOG_FILE=spark-submit_${EXECUTION_TIMESTAMP}.log
MASTER=
INSTANCE_ID=
TENANT_ID=
TENANT_SECRET=
CLUSTER_MASTER_URL=
SPARK_VERSION=
submissionId=
declare -a CONF_KEY
declare -a CONF_VAL
confI=0
CHECK_STATUS=false
KILL_JOB=false
PY_APP=false
IS_JOB_ERROR=false
HEADER_REQUESTED_WITH=spark-submit
VERSION="1.0.11"
# Determine which sha command to use for UUID calculation
SHASUM_CMD=""
if hash shasum 2>/dev/null; then
SHASUM_CMD="shasum -a 1"
elif hash sha1sum 2>/dev/null; then
SHASUM_CMD="sha1sum"
else
printf "\nCould not find \"sha1sum\" or equivalent command on system. Aborting.\n"
exit -1
fi
# UUID=$(openssl rand -base64 64 | ${SHASUM_CMD} | awk '{print $1}')
SERVER_SUB_DIR="${SS_SPARK_WORK_DIR}/tmp"
uploadList=" "
# =============================================================================
# -- Functions ----------------------------------------------------------------
# =============================================================================
printUsage()
{
printf "\nUsage:"
printf "\n spark-submit.sh --vcap <vcap-file> [options] <app jar | python file> [app arguments]"
printf "\n spark-submit.sh --master [cluster-master-url] --conf 'PROP=VALUE' [options] <app jar | python file> [app arguments]"
printf "\n spark-submit.sh --vcap <vcap-file> --kill [submission ID] "
printf "\n spark-submit.sh --vcap <vcap-file> --status [submission ID] "
printf "\n spark-submit.sh --kill [submission ID] --master [cluster-master-url] --conf 'PROP=VALUE' "
printf "\n spark-submit.sh --status [submission ID] --master [cluster-master-url] --conf 'PROP=VALUE' "
printf "\n spark-submit.sh --help "
printf "\n spark-submit.sh --version "
printf "\n\n vcap-file: json format file that contains spark service credentials, "
printf "\n including cluster_master_url, tenant_id, instance_id, and tenant_secret"
printf "\n cluster_master_url: The value of 'cluster_master_url' on the service credentials page"
printf "\n\n options:"
printf "\n --help Print out usage information."
printf "\n --version Print out the version of spark-submit.sh"
printf "\n --master MASTER_URL MASTER_URL is the value of 'cluster-master-url' from spark service instance credentials"
printf "\n --deploy-mode DEPLOY_MODE DEPLOY_MODE must be 'cluster'"
printf "\n --class CLASS_NAME Your application's main class (for Java / Scala apps)."
printf "\n --name NAME A name of your application."
printf "\n --jars JARS Comma-separated list of local jars to include on the driver and executor classpaths."
printf "\n --files FILES Comma-separated list of files to be placed in the working directory of each executor."
printf "\n --conf PROP=VALUE Arbitrary Spark configuration property. The values of tenant_id, instance_id, tenant_secret, and spark_version can be passed"
printf "\n --py-files PY_FILES Comma-separated list of .zip, .egg, or .py files to place on the PYTHONPATH for Python apps."
printf "\n\n --kill SUBMISSION_ID If given, kills the driver specified."
printf "\n --status SUBMISSION_ID If given, requests the status of the driver specified."
printf "\n"
exit 0
}
printVersion()
{
printf "spark-submit.sh VERSION : '${VERSION}'\n"
exit 0
}
logMessage()
{
if [ "${SS_LOG_ENABLE}" = "true" ]
then
printf "$1" >> ${LOG_FILE}
else
printf "$1"
fi
}
logFile()
{
logMessage "\nContents of $1:\n"
if [ "${SS_LOG_ENABLE}" = "true" ]
then
cat "$1" >> ${LOG_FILE}
else
cat "$1"
fi
}
console()
{
local output_line=$1
printf "${output_line}"
logMessage "${output_line}"
}
endScript()
{
console "\nSubmission complete.\n"
console "spark-submit log file: ${LOG_FILE}\n"
}
endScriptWithCommands()
{
if [ -n "${submissionId}" ]
then
console "Job may still be running.\n"
console "To poll for job status, run the following command:\n"
if [ ! -z "${VCAP_FILE}" ]
then
console "\"spark-submit.sh --status ${submissionId} --vcap ${VCAP_FILE} \" \n"
else
console "\"spark-submit.sh --status ${submissionId} --master ${MASTER} --conf 'spark.service.tenant_id=${TENANT_ID}' --conf 'spark.service.tenant_secret=${TENANT_SECRET}' --conf 'spark.service.instance_id=${INSTANCE_ID}'\" \n"
fi
console "After the job is done, run the following command to download stderr and stdout of the job to local:\n"
console "\"curl ${SS_CURL_OPTIONS} -X GET $(get_http_authentication) -H '$(get_http_instance_id)' https://${HOSTNAME}/tenant/data/${SS_SPARK_WORK_DIR}/${submissionId}/stdout > stdout\" \n"
console "\"curl ${SS_CURL_OPTIONS} -X GET $(get_http_authentication) -H '$(get_http_instance_id)' https://${HOSTNAME}/tenant/data/${SS_SPARK_WORK_DIR}/${submissionId}/stderr > stderr\" \n"
# console "\"curl ${SS_CURL_OPTIONS} -X GET $(get_http_authentication) -H '$(get_http_instance_id)' https://${HOSTNAME}/tenant/data/${SS_SPARK_WORK_DIR}/${submissionId}/model.zip > model.zip\" \n"
if [ "${SS_APP_MAIN_UPLOAD}" = "true" ] || [ "${SS_JARS_UPLOAD}" = "true" ] || [ "${SS_FILES_UPLOAD}" = "true" ]
then
console "After the job is done, we recommend to run the following command to clean the job workspace: \n"
console "\"curl ${SS_CURL_OPTIONS} -X DELETE $(get_http_authentication) -H '$(get_http_instance_id)' https://${HOSTNAME}/tenant/data/${SERVER_SUB_DIR}\" \n"
fi
fi
console "spark-submit log file: ${LOG_FILE}\n"
}
base64Encoder()
{
encoded="`printf $1 | base64`"
echo "${encoded}"
}
get_from_vcap()
{
local vcapFilePath=$1
local vcapKey=$2
# Handle dos2unix issues.
local ctrl_m=$(printf '\015')
echo `grep ${vcapKey}\" ${vcapFilePath} | awk '{print $2}' | sed 's/\"//g' | sed 's/\,//g' | sed "s/${ctrl_m}//g"`
}
get_hostname_from_url()
{
local url=$1
echo ${url} | sed -n 's/[^:]*:\/\/\([^:]*\)[:]*.*/\1/p'
}
get_http_authentication()
{
echo "-u ${TENANT_ID}:${TENANT_SECRET}"
}
get_http_instance_id()
{
echo "X-Spark-service-instance-id: ${INSTANCE_ID}"
}
get_requested_with_header()
{
echo "X-Requested-With: ${HEADER_REQUESTED_WITH}"
}
display_master_url_err_msg()
{
console "ERROR: master URL is missing. Use either --master or --vcap option. Run with --help for usage information.\n"
}
display_err_msg()
{
console "ERROR: $1 is missing. Use either --vcap or --conf option. Run with --help for usage information.\n"
}
display_err_msg_spark_version()
{
console "ERROR: Spark service configuration \"spark.service.spark_version\" is missing. Specify the Spark version using --conf option as \"--conf spark.service.spark_version=<spark version>\". Run with --help for usage information.\n"
}
get_conf_options()
{
logMessage "\nValues passed with --conf option...\n\n"
for ((i=0; i<${#CONF_KEY[@]}; ++i))
do
conf_key=${CONF_KEY[${i}]}
conf_val=${CONF_VAL[${i}]}
logMessage "\t${conf_key} : ${conf_val} \n"
if [[ "${conf_key}" == "spark.service.tenant_id" ]]; then
if [[ -z "${TENANT_ID}" ]]; then
TENANT_ID="${conf_val}"
elif [[ "${conf_val}" != "${TENANT_ID}" ]]; then #if tenant_id is specified in vcap file and in --conf option, and they are not same, then use the one from --conf option.
TENANT_ID="${conf_val}"
logMessage "WARN: configuration \"${conf_key}\" : \"${conf_val}\" does not match with tenant_id in ${VCAP_FILE} file. Using \"${conf_key}\"'s value.\n"
fi
fi
if [[ "${conf_key}" == "spark.service.instance_id" ]]; then
if [[ -z "${INSTANCE_ID}" ]]; then
INSTANCE_ID="${conf_val}"
elif [[ "${conf_val}" != "${INSTANCE_ID}" ]]; then #if instance_id is specified in vcap file and in --conf option, and they are not same, then use the one from --conf option.
INSTANCE_ID="${conf_val}"
logMessage "WARN: configuration \"${conf_key}\" : \"${conf_val}\" does not match with instance_id in ${VCAP_FILE} file. Using \"${conf_key}\"'s value. \n"
fi
fi
if [[ "${conf_key}" == "spark.service.tenant_secret" ]]; then
if [[ -z "${TENANT_SECRET}" ]]; then
TENANT_SECRET="${conf_val}"
elif [[ "${conf_val}" != "${TENANT_SECRET}" ]]; then #if tenant_secret is specified in vcap file and in --conf option, and they are not same, then use the one from --conf option.
TENANT_SECRET="${conf_val}"
logMessage "WARN: configuration \"${conf_key}\" : \"${conf_val}\" does not match with tenant_secret in ${VCAP_FILE} file. Using \"${conf_key}\"'s value. \n"
fi
fi
if [[ "${conf_key}" == "spark.service.spark_version" ]]; then
SPARK_VERSION="${conf_val}"
fi
done
}
local2server()
{
local localPath=$1
local serverPath=$2
local cmd="curl ${SS_CURL_OPTIONS} -X PUT $(get_http_authentication) -H '$(get_http_instance_id)' --data-binary '@${localPath}' https://${HOSTNAME}/tenant/data/${serverPath}"
console "\nUploading ${localPath}\n"
logMessage "local2server command: ${cmd}\n"
local result=$(eval "${cmd}")
uploadList+="$(fileNameFromPath ${localPath})"
logMessage "local2server result: ${result}\n"
}
deleteFolderOnServer()
{
local serverDir=$1
local cmd="curl ${SS_CURL_OPTIONS} -X DELETE $(get_http_authentication) -H '$(get_http_instance_id)' https://${HOSTNAME}/tenant/data/${serverDir}"
console "\nDeleting workspace on server\n"
logMessage "deleteFolderOnServer command: ${cmd}\n"
local result=$(eval "${cmd}")
logMessage "deleteFolderOnServer result: ${result}\n"
}
local2server_list()
{
local localFiles=$1
local files=$2
OIFS=${IFS}
IFS=","
localFileArray=(${localFiles})
fileArray=(${files})
IFS=${OIFS}
for ((i=0; i<${#localFileArray[@]}; ++i))
do
local2server ${localFileArray[${i}]} ${fileArray[${i}]}
done
}
fileNameFromPath()
{
local path=$1
local fileName="`echo ${path} | awk 'BEGIN{FS="/"}{print $NF}'`"
echo "${fileName}"
}
fileNameFromPath_list()
{
local paths=$1
OIFS=${IFS}
IFS=","
pathArray=(${paths})
IFS=${OIFS}
local fileNames=
for ((i=0; i<${#pathArray[@]}; ++i))
do
local fileName=$(fileNameFromPath ${pathArray[${i}]})
if [ -z "${fileNames}" ]
then
fileNames="${fileName}"
else
fileNames="${fileNames},${fileName}"
fi
done
echo "${fileNames}"
}
convert2serverPath()
{
local fileName=$(fileNameFromPath $1)
local serverFile="${SERVER_SUB_DIR}/${fileName}"
echo "${serverFile}"
}
convert2serverPath_list()
{
local localFiles=$1
OIFS=${IFS}
IFS=","
localFileArray=(${localFiles})
IFS=${OIFS}
local serverFiles=
for ((i=0; i<${#localFileArray[@]}; ++i))
do
local serverFile=$(convert2serverPath ${localFileArray[${i}]})
if [ -z "${serverFiles}" ]
then
serverFiles="${serverFile}"
else
serverFiles="${serverFiles},${serverFile}"
fi
done
echo "${serverFiles}"
}
convert2submitPath()
{
local serverFile=$1
echo "${PREFIX_SERVER_PATH}/${serverFile}"
}
convert2submitPath_list()
{
local serverFiles=$1
OIFS=${IFS}
IFS=","
serverFileArray=(${serverFiles})
IFS=${OIFS}
local submitPaths=
for ((i=0; i<${#serverFileArray[@]}; ++i))
do
local submitPath=$(convert2submitPath ${serverFileArray[${i}]})
if [ -z "${submitPaths}" ]
then
submitPaths="${submitPath}"
else
submitPaths="${submitPaths},${submitPath}"
fi
done
echo "${submitPaths}"
}
server2local()
{
local serverPath=$1
local localPath=$2
local cmd="curl ${SS_CURL_OPTIONS} -X GET $(get_http_authentication) -H '$(get_http_instance_id)' -D '${localPath}.header' https://${HOSTNAME}/tenant/data/${serverPath}"
console "\nDownloading ${localPath}\n"
logMessage "server2local command: ${cmd}\n"
local result=$(eval "${cmd}")
fileExist="`cat "${localPath}.header" | grep "404 NOT FOUND" | wc -l`"
if [ "${fileExist}" ]
then
echo "${result}" > ${localPath}
fi
rm -f ${localPath}.header
return ${fileExist}
}
terminate_spark()
{
if [ -n "${submissionId}" ]
then
logMessage "WARN: Terminate signal received. Stop spark job: ${submissionId}\n"
local result=$(call_kill_REST)
logMessage "Terminate result : ${result}\n"
# Give it some time before polling for status
sleep ${SS_POLL_INTERVAL}
local resultStatus=$(call_status_REST)
driverStatus="`echo ${resultStatus} | sed -n 's/.*\"driverState\" : \"\([^\"]*\)\",.*/\1/p'`"
echo "Job kill: ${submissionId} status is ${driverStatus}"
fi
endScript
}
ctrlc_handle()
{
while true
do
read -p "Terminate submitted job? (y/n)" isCancel
case $isCancel in
[Yy]* ) isCancel=true; break;;
[Nn]* ) isCancel=false; break;;
* ) echo "Please answer yes or no";;
esac
done
if [[ "$isCancel" = "true" ]]; then
terminate_spark
exit 1
fi
while true
do
read -p "Continue polling for job status? (y/n)" isPolling
case $isPolling in
[Yy]* ) isPolling=true; break;;
[Nn]* ) isPolling=false; break;;
* ) echo "Please answer yes or no";;
esac
done
if [[ "$isPolling" = "false" ]]; then
endScriptWithCommands
exit 0
fi
}
substituteArg()
{
local arg=$1
local fileName="`echo ${arg} | sed -n 's/.*file:\/\/\([^\"]*\)\"/\1/p'`"
local newArg=${arg}
if [ -n "${fileName}" ]
then
if [[ "${uploadList}" =~ "${fileName}" ]]; then
newArg="\"file://${SERVER_SUB_DIR}/${fileName}\""
fi
fi
echo "${newArg}"
}
parsing_appArgs()
{
local argString=$1
OIFS=${IFS}
IFS=","
local argArray=(${argString})
IFS=${OIFS}
local resultArgs=
for ((i=0; i<${#argArray[@]}; ++i))
do
local arg=$(substituteArg ${argArray[${i}]})
if [ -z "${resultArgs}" ]
then
resultArgs="${arg}"
else
resultArgs="${resultArgs},${arg}"
fi
done
echo "${resultArgs}"
}
isSparkServiceConf()
{
local conf_key="$1"
local spark_service_confs="spark.service.tenant_id spark.service.instance_id spark.service.tenant_secret"
[[ $spark_service_confs =~ $conf_key ]] && echo "true" || echo "false"
}
submit_REST_json()
{
local appArgs1="$1"
local appResource="$2"
local mainClass="$3"
local sparkJars="$4"
local sparkFiles="$5"
local sparkPYFiles="$6"
local appArgs=$(parsing_appArgs "${appArgs1}")
local reqJson="{"
reqJson+=" \"action\" : \"CreateSubmissionRequest\", "
if [ "${PY_APP}" = "true" ]
then
local appResourceFileName=$(fileNameFromPath ${appResource})
if [ -n "${sparkPYFiles}" ]
then
local sparkPYFileNames=$(fileNameFromPath_list ${sparkPYFiles})
if [ -n "${appArgs}" ]
then
appArgs="\"--primary-py-file\",\"${appResourceFileName}\",\"--py-files\",\"${sparkPYFileNames}\",${appArgs}"
else
appArgs="\"--primary-py-file\",\"${appResourceFileName}\",\"--py-files\",\"${sparkPYFileNames}\""
fi
else
if [ -n "${appArgs}" ]
then
appArgs="\"--primary-py-file\",\"${appResourceFileName}\",${appArgs}"
else
appArgs="\"--primary-py-file\",\"${appResourceFileName}\""
fi
fi
fi
reqJson+=" \"appArgs\" : [ ${appArgs} ], "
reqJson+=" \"appResource\" : \"${appResource}\","
reqJson+=" \"clientSparkVersion\" : \"${SPARK_VERSION}\","
reqJson+=" \"mainClass\" : \"${mainClass}\", "
reqJson+=" \"sparkProperties\" : { "
##### properties: spark.app.name
reqJson+=" \"spark.app.name\" : \"${APP_NAME}\", "
##### properties: spark.jars - add appResource to jars list if this is java application
if [ -n "${sparkJars}" ]
then
if [ "${PY_APP}" = "false" ]
then
sparkJars+=",${appResource}"
fi
else
if [ "${PY_APP}" = "false" ]
then
sparkJars=${appResource}
fi
fi
if [ -n "${sparkJars}" ]
then
reqJson+=" \"spark.jars\" : \"${sparkJars}\", "
fi
##### properties: spark.files - add appResource to files list if this is python application
if [ -n "${sparkFiles}" ]
then
if [ -n "${sparkPYFiles}" ]
then
sparkFiles+=",${appResource},${sparkPYFFiles}"
elif [ "${PY_APP}" == "true" ]
then
sparkFiles+=",${appResource}"
fi
else
if [ -n "${sparkPYFiles}" ]
then
sparkFiles="${appResource},${sparkPYFiles}"
elif [ "${PY_APP}" == "true" ]
then
sparkFiles="${appResource}"
fi
fi
if [ -n "${sparkFiles}" ]
then
reqJson+=" \"spark.files\" : \"${sparkFiles}\", "
fi
##### properties: spark.submit.pyFiles
if [ -n "${sparkPYFiles}" ]
then
reqJson+=" \"spark.submit.pyFiles\" : \"${sparkPYFiles}\", "
fi
for ((i=0; i<${#CONF_KEY[@]}; ++i))
do
if [[ $(isSparkServiceConf ${CONF_KEY[${i}]}) == "false" ]]; then
reqJson+=" \"${CONF_KEY[${i}]}\" : \"${CONF_VAL[${i}]}\", "
fi
done
##### properties: spark.service.* : all properties specific for spark service
reqJson+=" \"spark.service.tenant_id\" : \"${TENANT_ID}\", "
reqJson+=" \"spark.service.instance_id\" : \"${INSTANCE_ID}\", "
reqJson+=" \"spark.service.tenant_secret\" : \"${TENANT_SECRET}\" "
reqJson+="}"
reqJson+="}"
echo ${reqJson}
}
status_kill_REST_json()
{
reqJson="{"
reqJson+=" \"sparkProperties\" : { "
reqJson+=" \"spark.service.tenant_id\" : \"${TENANT_ID}\", "
reqJson+=" \"spark.service.instance_id\" : \"${INSTANCE_ID}\", "
reqJson+=" \"spark.service.tenant_secret\" : \"${TENANT_SECRET}\", "
reqJson+=" \"spark.service.spark_version\" : \"${SPARK_VERSION}\" "
reqJson+="}"
reqJson+="}"
echo ${reqJson}
}
call_status_REST()
{
local requestBody=$(status_kill_REST_json)
local cmd="curl ${SS_CURL_OPTIONS} -X GET -H '$(get_requested_with_header)' -i --data-binary '${requestBody}' https://${HOSTNAME}/v1/submissions/status/${submissionId}"
console "\nGetting status\n"
logMessage "call_status_REST command: ${cmd}\n"
local statusRequest=$(eval "${cmd}")
logMessage "call_status_REST result: ${statusRequest}\n"
echo "${statusRequest}"
}
call_kill_REST()
{
local requestBody=$(status_kill_REST_json)
local cmd="curl ${SS_CURL_OPTIONS} -X POST -H '$(get_requested_with_header)' -i --data-binary '${requestBody}' https://${HOSTNAME}/v1/submissions/kill/${submissionId}"
console "\nKilling submission\n"
logMessage "call_kill_REST command: ${cmd}\n"
local killRequest=$(eval "${cmd}")
logMessage "call_kill_REST result: ${killRequest}\n"
echo "${killRequest}"
}
# =============================================================================
# -- Main ---------------------------------------------------------------------
# =============================================================================
trap ctrlc_handle SIGINT
# -- Parse command line arguments ---------------------------------------------
if [[ $# == 0 ]]
then
printUsage
exit 1
fi
while [[ $# > 0 ]]
do
key="$1"
case $key in
--help)
printUsage
;;
--version)
printVersion
;;
--master)
MASTER="$2"
HOSTNAME=$(get_hostname_from_url ${MASTER})
logMessage "MASTER HOSTNAME: ${HOSTNAME}\n"
shift
shift
;;
--jars)
JARS="$2"
shift
shift
;;
--files)
FILES="$2"
shift
shift
;;
--class)
CLASS="$2"
shift
shift
;;
--conf)
aconf="$2"
CONF_KEY[${confI}]="`echo ${aconf} | sed -n 's/\([^=].*\)=\(.*\)/\1/p'`"
CONF_VAL[${confI}]="`echo ${aconf} | sed -n 's/\([^=].*\)=\(.*\)/\2/p'`"
((confI++))
shift
shift
;;
--vcap)
VCAP_FILE="$2"
shift
shift
;;
--status)
CHECK_STATUS=true
submissionId="$2"
shift
shift
;;
--kill)
KILL_JOB=true
submissionId="$2"
shift
shift
;;
--name)
APP_NAME="$2"
shift
shift
;;
--py-files)
PY_FILES="$2"
PY_APP=true
shift
shift
;;
--deploy-mode)
DEPLOY_MODE="$2"
shift
shift
;;
*)
if [[ "${key}" =~ ^--.* ]] && [[ -z "${APP_MAIN}" ]]; then
printf "Error: Unrecognized option: ${key} \n"
printUsage
exit 1
else
if [ -z "${APP_MAIN}" ]
then
APP_MAIN="${key}"
shift
else
if [ -z "${app_parms}" ]
then
app_parms=" \"${key}\" "
else
app_parms="${app_parms}, \"${key}\" "
fi
shift
fi
fi
;;
esac
done
# -- Initialize log file ------------------------------------------------------
if [ "${SS_LOG_ENABLE}" = "true" ]
then
rm -f ${LOG_FILE}
console "To see the log, in another terminal window run the following command:\n"
console "tail -f ${LOG_FILE}\n\n"
logMessage "Timestamp: ${EXECUTION_TIMESTAMP}\n"
logMessage "Date: $(date +'%Y-%m-%d %H:%M:%S')\n"
logMessage "VERSION: ${VERSION}\n"
logMessage "\nCommand invocation: ${invokeCommand}\n"
fi
# -- Check variables ----------------------------------------------------------
# Check if both vcap file and --master option are not specified,if so raise error
if [[ -z "${VCAP_FILE}" ]] && [[ -z "${MASTER}" ]];
then
display_master_url_err_msg
exit 1
fi
# -- Pull values from VCAP ----------------------------------------------------
if [ ! -z "${VCAP_FILE}" ]
then
logFile ${VCAP_FILE}
INSTANCE_ID=$(get_from_vcap ${VCAP_FILE} "instance_id")
TENANT_ID=$(get_from_vcap ${VCAP_FILE} "tenant_id")
TENANT_SECRET=$(get_from_vcap ${VCAP_FILE} "tenant_secret")
CLUSTER_MASTER_URL=$(get_from_vcap ${VCAP_FILE} "cluster_master_url")
fi
# -- Check variables ----------------------------------------------------------
# Check if vcap file doesnt contain master url and --master option is not specified, if so raise error.
if [[ -z "${CLUSTER_MASTER_URL}" ]] && [[ -z "${MASTER}" ]]
then
display_master_url_err_msg
exit 1
fi
vcap_hostname=$(get_hostname_from_url ${CLUSTER_MASTER_URL})
if [ ! -z "${MASTER}" ]
then
if [ "${HOSTNAME}" != "${vcap_hostname}" ] # if both the --master option and vcap are specified and they are not same, use the master url from --master option.
then
logMessage "WARN: The URL specified in '--master ${MASTER}' option does not match with the URL in 'cluster_master_url ${CLUSTER_MASTER_URL}' in '--vcap' ${VCAP_FILE}. Using ${MASTER} url.\n"
fi
else
HOSTNAME="${vcap_hostname}" #If --master option is not specified, then use the master url from vcap.
fi
# If IP address (i.e. not a FQDN), then add "--insecure" curl option.
if [[ "${HOSTNAME}" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
SS_CURL_OPTIONS="${SS_CURL_OPTIONS} --insecure"
fi
# -- Get values from --conf option --------------------------------------------
if [ ! -z "${aconf}" ]
then
get_conf_options
fi
# -- Check variables ----------------------------------------------------------
if [[ -z "${TENANT_ID}" ]]; then
display_err_msg "TENANT_ID"
exit 1
elif [[ -z "${TENANT_SECRET}" ]]; then
display_err_msg "TENANT_SECRET"
exit 1
elif [[ -z "${INSTANCE_ID}" ]]; then
display_err_msg "INSTANCE_ID"
exit 1
fi
if [[ -z "${SPARK_VERSION}" ]]; then
display_err_msg_spark_version
exit 1
fi
# -- Handle request for status or cancel -------------------------------------
if [ "${CHECK_STATUS}" = "true" ]
then
if [ -n "${submissionId}" ]
then
console "$(call_status_REST)\n"
exit 0
else
console "ERROR: You need to specify submission ID after --status option. Run with --help for usage information.\n"
exit 1
fi
fi
if [ "${KILL_JOB}" = "true" ]
then
if [ -n "${submissionId}" ]
then
console "$(call_kill_REST)\n"
exit 0
else
console "ERROR: You need to specify submission ID after --kill option. Run with --help for usage information.\n"
exit 1
fi
fi
# -- Handle request for submit -----------------------------------------------
if [ -z "${DEPLOY_MODE}" ] || [ "${DEPLOY_MODE}" != "cluster" ]
then
console "ERROR: '--deploy-mode' must be set to 'cluster'.\n"
exit 1
fi
if [ -z "${APP_MAIN}" ]
then
console "ERROR: The main application file is not specified correctly. Run with --help for usage information.\n"
exit 1
fi
if [[ "${APP_MAIN}" =~ .*\.py ]]; then
PY_APP=true
fi
if [ -z "${APP_NAME}" ]
then
if [ -z "${CLASS}" ]
then
APP_NAME=${APP_MAIN}
else
APP_NAME=${CLASS}
fi
fi
if [[ "${PY_APP}" = "false" ]] && [[ -z ${CLASS} ]]; then
console "ERROR: Missing option --class \n"
exit 1
fi
# -- Synthesize variables -----------------------------------------------------
if [ -z ${PREFIX_SERVER_PATH} ]; then PREFIX_SERVER_PATH="/gpfs/fs01/user/${TENANT_ID}/data"; fi
# -- Prepare remote path and upload files to the remote path ------------------
posixJars=
if [ "${JARS}" ]
then
if [ "${SS_JARS_UPLOAD}" = "true" ]
then
posixJars=$(convert2serverPath_list ${JARS})
local2server_list ${JARS} ${posixJars}
#posixJars=$(convert2submitPath_list ${posixJars})
else
posixJars="${JARS}"
fi
fi
posixFiles=
if [ "${FILES}" ]
then
if [ "${SS_FILES_UPLOAD}" = "true" ]
then
posixFiles=$(convert2serverPath_list ${FILES})
local2server_list ${FILES} ${posixFiles}
else
posixFiles="${FILES}"
fi
fi
posixPYFiles=
if [ "${PY_FILES}" ]
then
if [ "${SS_FILES_UPLOAD}" = "true" ]
then
posixPYFiles=$(convert2serverPath_list ${PY_FILES})
local2server_list ${PY_FILES} ${posixPYFiles}
else
posixPYFiles="${PY_FILES}"
fi
fi
if [ "${SS_APP_MAIN_UPLOAD}" = "true" ]
then
app_server_path=$(convert2serverPath ${APP_MAIN})
local2server ${APP_MAIN} ${app_server_path}
#app_server_path=$(convert2submitPath ${app_server_path})
else
app_server_path=${APP_MAIN}
fi
# -- Compose spark-submit command ---------------------------------------------
mainClass=${CLASS}
if [ "${PY_APP}" = "true" ]
then
mainClass="org.apache.spark.deploy.PythonRunner"
fi
requestBody=$(submit_REST_json "${app_parms}" "${app_server_path}" "${mainClass}" "${posixJars}" "${posixFiles}" "${posixPYFiles}")
# -- Call spark-submit REST to submit the job to spark cluster ---------------------
cmd="curl ${SS_CURL_OPTIONS} -X POST -H '$(get_requested_with_header)' --data-binary '${requestBody}' https://${HOSTNAME}/v1/submissions/create"
console "\nSubmitting Job\n"
logMessage "Submit job command: ${cmd}\n"
resultSubmit=$(eval "${cmd}")
logMessage "Submit job result: ${resultSubmit}\n"
# -- Parse submit job output to find 'submissionId' value ---------------------
submissionId="`echo ${resultSubmit} | sed -n 's/.*\"submissionId\" : \"\([^\"]*\)\",.*/\1/p'`"
logMessage "\nSubmission ID: ${submissionId}\n"
if [ -z "${submissionId}" ]
then
logMessage "ERROR: Problem submitting job. Exit\n"
endScript
exit 1
fi
console "\nJob submitted : ${submissionId}\n"
# -- Periodically poll job status ---------------------------------------------
driverStatus="NULL"
jobFinished=false
jobFailed=false
try=1
while [[ "${jobFinished}" == false ]]
do
console "\nPolling job status. Poll #${try}.\n"
resultStatus=$(call_status_REST)
((try++))
driverStatus="`echo ${resultStatus} | sed -n 's/.*\"driverState\" : \"\([^\"]*\)\",.*/\1/p'`"
console "driverStatus is ${driverStatus}\n"
case ${driverStatus} in
FINISHED)
console "\nJob finished\n"
jobFinished=true
;;
RUNNING|SUBMITTED)
console "Next poll in ${SS_POLL_INTERVAL} seconds.\n"
sleep ${SS_POLL_INTERVAL}
jobFinished=false
;;
*)
IS_JOB_ERROR=true
logMessage "\n\n==== Failed Status output =====================================================\n"
logMessage "${resultStatus}\n"
logMessage "===============================================================================\n\n"
jobFinished=true
jobFailed=true
;;
esac
done
# -- Download stdout and stderr files -----------------------------------------
logMessage=""
if [ -n "${submissionId}" ]
then
LOCAL_STDOUT_FILENAME="stdout"
LOCAL_STDERR_FILENAME="stderr"
# MODEL_FILENAME="model.zip"
stdout_server_path="${SS_SPARK_WORK_DIR}/${submissionId}/stdout"
server2local ${stdout_server_path} ${LOCAL_STDOUT_FILENAME}
if [ "$?" != 0 ]
then
console "Failed to download from ${stdout_server_path} to ${LOCAL_STDOUT_FILENAME}\n"
else
logMessage="View job's stdout log at ${LOCAL_STDOUT_FILENAME}\n"
fi
stderr_server_path="${SS_SPARK_WORK_DIR}/${submissionId}/stderr"
server2local ${stderr_server_path} ${LOCAL_STDERR_FILENAME}
if [ "$?" != 0 ]
then
console "Failed to download from ${stderr_server_path} to ${LOCAL_STDERR_FILENAME}\n"
else
logMessage="${logMessage}View job's stderr log at ${LOCAL_STDERR_FILENAME}\n"
fi
# model_path="${SS_SPARK_WORK_DIR}/${submissionId}/model.zip"
# server2local ${model_path} ${MODEL_FILENAME}
# if [ "$?" != 0 ]
# then
# console "Failed to download from ${model_path} to ${MODEL_FILENAME}\n"
# else
# logMessage="${logMessage}View job's stderr log at ${MODEL_FILENAME}\n"
# fi
fi
# -- Delete transient files on spark cluster ----------------------------------
if [ "${SS_APP_MAIN_UPLOAD}" = "true" ] || [ "${SS_JARS_UPLOAD}" = "true" ] || [ "${SS_FILES_UPLOAD}" = "true" ]
then
if [ "${jobFinished}" = "true" ]
then
deleteFolderOnServer ${SERVER_SUB_DIR}
fi
fi
# -- Epilog -------------------------------------------------------------------
if [ "${IS_JOB_ERROR}" = "true" ]
then
console "\nERROR: Job failed.\n"
console "spark-submit log file: ${LOG_FILE}\n"
console "${logMessage}"
exit 1
else
endScript
console "${logMessage}"
fi
# -- --------------------------------------------------------------------------
| 8,140 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark/src/wrapper.py | import os
from shutil import copyfile
import sys
import json
import re
os.system('pip install Minio --user')
from minio import Minio
# Load Credential file
copyfile('../tmp/creds.json', './creds.json')
with open('creds.json') as f:
creds = json.load(f)
f.close()
# Remove possible http scheme for Minio
url = re.compile(r"https?://")
cos_endpoint = url.sub('', creds['cos_endpoint'])
# Download the data and model file from the object storage.
cos = Minio(cos_endpoint,
access_key=creds['cos_access_key'],
secret_key=creds['cos_secret_key'],
secure=True)
cos.fget_object(creds['bucket_name'], creds['data_filename'], creds['data_filename'])
cos.fget_object(creds['bucket_name'], creds['model_filename'], creds['model_filename'])
os.system('chmod 755 %s' % creds['model_filename'])
os.system(creds['spark_entrypoint'])
os.system('zip -r model.zip model')
os.system('zip -r train_data.zip train_data')
cos.fput_object(creds['bucket_name'], 'model.zip', 'model.zip')
cos.fput_object(creds['bucket_name'], 'train_data.zip', 'train_data.zip')
cos.fput_object(creds['bucket_name'], 'evaluation.json', 'evaluation.json')
print('Trained model and train_data are uploaded.')
| 8,141 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/train_spark/src/train_spark.py | import os
import argparse
import json
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--data_filename', type=str, help='Name of the data binary', default="")
parser.add_argument('--model_filename', type=str, help='Name of the training model file', default="model.py")
parser.add_argument('--spark_entrypoint', type=str, help='Entrypoint command for training the spark model', default="python model.py")
args = parser.parse_args()
cos_bucket_name = args.bucket_name
data_filename = args.data_filename
model_filename = args.model_filename
spark_entrypoint = args.spark_entrypoint
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
tenant_id = get_secret_creds("/app/secrets/spark_tenant_id")
cluster_master_url = get_secret_creds("/app/secrets/spark_cluster_master_url")
tenant_secret = get_secret_creds("/app/secrets/spark_tenant_secret")
instance_id = get_secret_creds("/app/secrets/spark_instance_id")
''' Create credentials and vcap files for spark submit'''
creds = {
"cos_endpoint": cos_endpoint,
"cos_access_key": cos_access_key,
"cos_secret_key": cos_secret_key,
"bucket_name": cos_bucket_name,
"data_filename": data_filename,
"model_filename": model_filename,
"spark_entrypoint": spark_entrypoint
}
with open('creds.json', 'w') as f:
json.dump(creds, f)
f.close()
spark_vcap = {
"tenant_id": tenant_id,
"cluster_master_url": cluster_master_url,
"tenant_secret": tenant_secret,
"instance_id": instance_id
}
with open('vcap.json', 'w') as f:
json.dump(spark_vcap, f, indent=2)
f.close()
os.system('chmod 777 spark-submit.sh')
os.system('./spark-submit.sh --vcap ./vcap.json --deploy-mode cluster --conf spark.service.spark_version=2.1 --files creds.json wrapper.py')
os.system('cat stdout')
with open("/tmp/model_filepath", "w") as report:
report.write("model.zip")
with open("/tmp/train_data_filepath", "w") as report:
report.write("train_data.zip")
| 8,142 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/store_spark_model/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Store Spark Model - Watson Machine Learning'
description: |
Store any trained Spark Model using IBM Watson Machine Learning Service
metadata:
annotations: {platform: 'IBM Watson Machine Learning Service'}
inputs:
- {name: bucket_name, description: 'Required. Object storage bucket name'}
- {name: aios_manifest_path, description: 'Required. Object storage file path for the aios manifest file'}
- {name: problem_type, description: 'Required. Model problem type'}
- {name: model_name, description: 'Required. Model name for the trained model'}
- {name: deployment_name, description: 'Required. Deployment name for the trained model'}
- {name: model_filepath, description: 'Required. Name of the trained model zip'}
- {name: train_data_filepath, description: 'Required. Name of the training data zip'}
outputs:
- {name: model_uid, description: 'Stored model UID'}
implementation:
container:
image: docker.io/aipipeline/store_spark_model:latest
command: ['python']
args: [
-u, store_spark_model.py,
--bucket_name, {inputValue: bucket_name},
--aios_manifest_path, {inputValue: aios_manifest_path},
--problem_type, {inputValue: problem_type},
--model_name, {inputValue: model_name},
--deployment_name, {inputValue: deployment_name},
--model_filepath, {inputValue: model_filepath},
--train_data_filepath, {inputValue: train_data_filepath}
]
fileOutputs:
model_uid: /tmp/model_uid
| 8,143 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/store_spark_model/Dockerfile | FROM aipipeline/pyspark:spark-2.1
RUN pip install --upgrade pip
RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale Minio --no-cache | tail -n 1
RUN pip install psycopg2-binary | tail -n 1
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
USER root
ENTRYPOINT ["python"]
CMD ["store_spark_model.py"]
| 8,144 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/store_spark_model | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/store_spark_model/src/store_spark_model.py | import argparse
import json
import os
import re
from pyspark.sql import SparkSession
from pyspark.ml.pipeline import PipelineModel
from pyspark import SparkConf, SparkContext
from pyspark.ml import Pipeline, Model
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--model_filepath', type=str, help='Name of the trained spark model packaged as zip', default="model.zip")
parser.add_argument('--train_data_filepath', type=str, help='Name of the train_data zip', default="train_data.zip")
parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default="")
parser.add_argument('--problem_type', type=str, help='Model problem type', default="BINARY_CLASSIFICATION")
parser.add_argument('--model_name', type=str, help='model name for the trained model', default="Spark German Risk Model - Final")
parser.add_argument('--deployment_name', type=str, help='deployment name for the trained model', default="Spark German Risk Deployment - Final")
args = parser.parse_args()
cos_bucket_name = args.bucket_name
model_filepath = args.model_filepath
aios_manifest_path = args.aios_manifest_path
train_data_filepath = args.train_data_filepath
problem_type = args.problem_type
MODEL_NAME = args.model_name
DEPLOYMENT_NAME = args.deployment_name
wml_url = get_secret_creds("/app/secrets/wml_url")
wml_instance_id = get_secret_creds("/app/secrets/wml_instance_id")
wml_apikey = get_secret_creds("/app/secrets/wml_apikey")
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Remove possible http scheme for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
WML_CREDENTIALS = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
''' Load Spark model '''
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_bucket_name, model_filepath, model_filepath)
cos.fget_object(cos_bucket_name, train_data_filepath, train_data_filepath)
cos.fget_object(cos_bucket_name, 'evaluation.json', 'evaluation.json')
if aios_manifest_path:
cos.fget_object(cos_bucket_name, aios_manifest_path, aios_manifest_path)
os.system('unzip %s' % model_filepath)
print('model ' + model_filepath + ' is downloaded')
os.system('unzip %s' % train_data_filepath)
print('train_data ' + train_data_filepath + ' is downloaded')
sc = SparkContext()
model = PipelineModel.load(model_filepath.split('.')[0])
pipeline = Pipeline(stages=model.stages)
spark = SparkSession.builder.getOrCreate()
train_data = spark.read.csv(path=train_data_filepath.split('.')[0], sep=",", header=True, inferSchema=True)
''' Remove previous deployed model '''
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
model_deployment_ids = wml_client.deployments.get_uids()
deleted_model_id = None
for deployment_id in model_deployment_ids:
deployment = wml_client.deployments.get_details(deployment_id)
model_id = deployment['entity']['deployable_asset']['guid']
if deployment['entity']['name'] == DEPLOYMENT_NAME:
print('Deleting deployment id', deployment_id)
wml_client.deployments.delete(deployment_id)
print('Deleting model id', model_id)
wml_client.repository.delete(model_id)
deleted_model_id = model_id
wml_client.repository.list_models()
''' Save and Deploy model '''
if aios_manifest_path:
with open(aios_manifest_path) as f:
aios_manifest = json.load(f)
OUTPUT_DATA_SCHEMA = {'fields': aios_manifest['model_schema'], 'type': 'struct'}
f.close()
else:
OUTPUT_DATA_SCHEMA = None
with open('evaluation.json') as f:
evaluation = json.load(f)
f.close()
if problem_type == 'BINARY_CLASSIFICATION':
EVALUATION_METHOD = 'binary'
else:
EVALUATION_METHOD = 'multiclass'
''' Define evaluation threshold '''
model_props = {
wml_client.repository.ModelMetaNames.NAME: "{}".format(MODEL_NAME),
wml_client.repository.ModelMetaNames.EVALUATION_METHOD: EVALUATION_METHOD,
wml_client.repository.ModelMetaNames.EVALUATION_METRICS: evaluation['metrics']
}
if aios_manifest_path:
model_props[wml_client.repository.ModelMetaNames.OUTPUT_DATA_SCHEMA] = OUTPUT_DATA_SCHEMA
wml_models = wml_client.repository.get_details()
model_uid = None
for model_in in wml_models['models']['resources']:
if MODEL_NAME == model_in['entity']['name']:
model_uid = model_in['metadata']['guid']
break
if model_uid is None:
print("Storing model ...")
published_model_details = wml_client.repository.store_model(model=model, meta_props=model_props, training_data=train_data, pipeline=pipeline)
model_uid = wml_client.repository.get_model_uid(published_model_details)
print("Done")
else:
print("Model already exist")
with open("/tmp/model_uid", "w") as report:
report.write(model_uid)
| 8,145 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/data_preprocess_spark/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Preprocess Data using Spark - IBM Cloud'
description: |
Preprocess data using IBM Cloud Spark Service
metadata:
annotations: {platform: 'IBM Cloud Spark Service'}
inputs:
- {name: bucket_name, description: 'Required. Object storage bucket name'}
- {name: data_url, description: 'Required. URL of the data source'}
outputs:
- {name: output, description: 'Data Filename'}
implementation:
container:
image: docker.io/aipipeline/data_preprocess_spark:latest
command: ['python']
args: [
-u, data_preprocess_spark.py,
--bucket_name, {inputValue: bucket_name},
--data_url, {inputValue: data_url}
]
fileOutputs:
output: /tmp/filename
| 8,146 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/data_preprocess_spark/Dockerfile | FROM aipipeline/pyspark:spark-2.1
RUN pip install --upgrade pip
RUN pip install --upgrade Minio --no-cache | tail -n 1
RUN pip install psycopg2-binary | tail -n 1
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
USER root
ENTRYPOINT ["python"]
CMD ["data_preprocess_spark.py"]
| 8,147 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/data_preprocess_spark | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/spark/data_preprocess_spark/src/data_preprocess_spark.py | import argparse
import requests
from pyspark.sql import SparkSession
from minio import Minio
from minio.error import ResponseError
import re
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--data_url', type=str, help='URL of the data source', required=True)
args = parser.parse_args()
cos_bucket_name = args.bucket_name
data_url = args.data_url
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Remove possible http scheme for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
''' Download data from data source '''
filename = data_url
response = requests.get(data_url, allow_redirects=True)
if data_url.find('/'):
filename = data_url.rsplit('/', 1)[1]
open(filename, 'wb').write(response.content)
''' Read data with Spark SQL '''
spark = SparkSession.builder.getOrCreate()
df_data = spark.read.csv(path=filename, sep=",", header=True, inferSchema=True)
df_data.head()
''' Upload data to Cloud object storage '''
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
if not cos.bucket_exists(cos_bucket_name):
try:
cos.make_bucket(cos_bucket_name)
except ResponseError as err:
print(err)
cos.fput_object(cos_bucket_name, filename, filename)
print('Data ' + filename + ' is uploaded to bucket at ' + cos_bucket_name)
with open("/tmp/filename", "w") as report:
report.write(filename)
df_data.printSchema()
print("Number of records: " + str(df_data.count()))
| 8,148 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons/config/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: 'Create Secret - Kubernetes Cluster'
description: |
Create secret to store pipeline credentials on Kubernetes Cluster
inputs:
- {name: token, description: 'Required. GitHub token for accessing private repository'}
- {name: url, description: 'Required. GitHub raw path for accessing the credential file'}
- {name: name, description: 'Required. Secret Name to be stored in Kubernetes'}
outputs:
- {name: secret_name, description: 'Kubernetes secret name'}
implementation:
container:
image: docker.io/aipipeline/wml-config:latest
command: ['python3']
args: [
/app/config.py,
--token, {inputValue: token},
--url, {inputValue: url},
--name, {inputValue: name},
--output-secret-name-file, {outputPath: secret_name},
]
| 8,149 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons/config/Dockerfile | FROM python:3.6-slim
# Directories for model codes and secrets
RUN mkdir /app
# Install curl and kubectl
RUN apt-get update
RUN apt-get install -y curl gnupg
RUN apt-get install -y apt-transport-https
RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
RUN echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee -a /etc/apt/sources.list.d/kubernetes.list
RUN apt-get update
RUN apt-get install -y kubectl
# Directory for secrets
COPY src/config.py /app
| 8,150 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons/config | kubeflow_public_repos/kfp-tekton-backend/components/ibm-components/commons/config/src/config.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--token', type=str, required=True)
parser.add_argument('--url', type=str, required=True)
parser.add_argument('--name', type=str)
parser.add_argument('--output-secret-name-file', type=str)
args = parser.parse_args()
access_token = args.token
config_file_path = args.url
# download config file
# the default creds.ini is in the public accesible github repo
import subprocess
import os
config_file = os.path.basename(config_file_path)
config_local_path = os.path.join('/tmp', config_file)
command = ['curl', '-H', 'Authorization: token %s' % access_token, '-L', '-o', config_local_path, config_file_path]
subprocess.run(command, check=True)
secret_name = args.name
if (not secret_name):
secret_name = 'ai-pipeline-' + os.path.splitext(config_file)[0]
try:
command = ['kubectl', 'delete', 'secret', secret_name]
subprocess.run(command, check=True)
except Exception as e:
print('No previous secret: ' + secret_name + '. Secret deletion is not performed.')
# gather all secrets
command = ['kubectl', 'create', 'secret', 'generic', secret_name]
import configparser
config = configparser.ConfigParser()
config.read(config_local_path)
for section in config.sections():
for key in config[section]:
command.append('--from-literal=%s=\'%s\'' % (key, config[section][key]))
# create the secret
subprocess.run(command, check=True)
# verify secret is created
subprocess.run(['kubectl', 'describe', 'secret', secret_name], check=True)
# indicate that secret is created and pass the secret name forward
from pathlib import Path
Path(args.output_secret_name_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_secret_name_file).write_text(secret_name)
| 8,151 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/confusion_matrix/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../build_image.sh -l ml-pipeline-local-confusion-matrix "$@"
| 8,152 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/confusion_matrix/component.yaml | name: Confusion matrix
description: Calculates confusion matrix
inputs:
- {name: Predictions, type: GCSPath, description: 'GCS path of prediction file pattern.'} # type: {GCSPath: {data_type: CSV}}
- {name: Target lambda, type: String, default: '', description: 'Text of Python lambda function which computes target value. For example, "lambda x: x[''a''] + x[''b'']". If not set, the input must include a "target" column.'}
- {name: Output dir, type: GCSPath, description: 'GCS path of the output directory.'} # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: MLPipeline UI metadata, type: UI metadata}
- {name: MLPipeline Metrics, type: Metrics}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:ad9bd5648dd0453005225779f25d8cebebc7ca00
command: [python2, /ml/confusion_matrix.py]
args: [
--predictions, {inputValue: Predictions},
--target_lambda, {inputValue: Target lambda},
--output, {inputValue: Output dir},
]
fileOutputs:
MLPipeline UI metadata: /mlpipeline-ui-metadata.json
MLPipeline Metrics: /mlpipeline-metrics.json
| 8,153 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/confusion_matrix/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-local-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/confusion_matrix.py"]
| 8,154 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local/confusion_matrix | kubeflow_public_repos/kfp-tekton-backend/components/local/confusion_matrix/src/confusion_matrix.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to generate confusion matrix data out of prediction results.
# Usage:
# python confusion_matrix.py \
# --predictions=gs://bradley-playground/sfpd/predictions/part-* \
# --output=gs://bradley-playground/sfpd/cm/ \
# --target=resolution \
# --analysis=gs://bradley-playground/sfpd/analysis \
import argparse
import json
import os
import urlparse
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
from tensorflow.python.lib.io import file_io
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Trainer')
parser.add_argument('--predictions', type=str, help='GCS path of prediction file pattern.')
parser.add_argument('--output', type=str, help='GCS path of the output directory.')
parser.add_argument('--target_lambda', type=str,
help='a lambda function as a string to compute target.' +
'For example, "lambda x: x[\'a\'] + x[\'b\']"' +
'If not set, the input must include a "target" column.')
args = parser.parse_args()
storage_service_scheme = urlparse.urlparse(args.output).scheme
on_cloud = True if storage_service_scheme else False
if not on_cloud and not os.path.exists(args.output):
os.makedirs(args.output)
schema_file = os.path.join(os.path.dirname(args.predictions), 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
names = [x['name'] for x in schema]
dfs = []
files = file_io.get_matching_files(args.predictions)
for file in files:
with file_io.FileIO(file, 'r') as f:
dfs.append(pd.read_csv(f, names=names))
df = pd.concat(dfs)
if args.target_lambda:
df['target'] = df.apply(eval(args.target_lambda), axis=1)
vocab = list(df['target'].unique())
cm = confusion_matrix(df['target'], df['predicted'], labels=vocab)
data = []
for target_index, target_row in enumerate(cm):
for predicted_index, count in enumerate(target_row):
data.append((vocab[target_index], vocab[predicted_index], count))
df_cm = pd.DataFrame(data, columns=['target', 'predicted', 'count'])
cm_file = os.path.join(args.output, 'confusion_matrix.csv')
with file_io.FileIO(cm_file, 'w') as f:
df_cm.to_csv(f, columns=['target', 'predicted', 'count'], header=False, index=False)
metadata = {
'outputs' : [{
'type': 'confusion_matrix',
'format': 'csv',
'schema': [
{'name': 'target', 'type': 'CATEGORY'},
{'name': 'predicted', 'type': 'CATEGORY'},
{'name': 'count', 'type': 'NUMBER'},
],
'source': cm_file,
# Convert vocab to string because for bealean values we want "True|False" to match csv data.
'labels': list(map(str, vocab)),
}]
}
with file_io.FileIO('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
accuracy = accuracy_score(df['target'], df['predicted'])
metrics = {
'metrics': [{
'name': 'accuracy-score',
'numberValue': accuracy,
'format': "PERCENTAGE",
}]
}
with file_io.FileIO('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
if __name__== "__main__":
main()
| 8,155 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/base/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p ./build
rsync -arvp "../confusion_matrix/src"/ ./build/
rsync -arvp "../roc/src"/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
docker build -t ml-pipeline-local-base .
rm -rf ./build
| 8,156 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/base/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget unzip
RUN easy_install pip
RUN pip install google-api-python-client==1.6.2
RUN pip install pandas==0.18.1
RUN pip install scikit-learn==0.19.1
RUN pip install scipy==1.0.0
RUN pip install tensorflow==1.5
RUN wget -nv https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \
unzip -qq google-cloud-sdk.zip -d tools && \
rm google-cloud-sdk.zip && \
tools/google-cloud-sdk/install.sh --usage-reporting=false \
--path-update=false --bash-completion=false \
--disable-installation-options && \
tools/google-cloud-sdk/bin/gcloud -q components update \
gcloud core gsutil && \
tools/google-cloud-sdk/bin/gcloud config set component_manager/disable_update_check true && \
touch /tools/google-cloud-sdk/lib/third_party/google.py
ADD build /ml
ENV PATH $PATH:/tools/node/bin:/tools/google-cloud-sdk/bin
| 8,157 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/roc/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../build_image.sh -l ml-pipeline-local-roc "$@"
| 8,158 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/roc/component.yaml | name: ROC curve
description: Calculates Receiver Operating Characteristic curve. See https://en.wikipedia.org/wiki/Receiver_operating_characteristic
inputs:
- {name: Predictions dir, type: GCSPath, description: 'GCS path of prediction file pattern.'} #TODO: Replace dir data + schema files # type: {GCSPath: {path_type: Directory}}
- {name: True class, type: String, default: 'true', description: 'The true class label for the sample. Default is "true".'}
- {name: True score column, type: String, default: 'true', description: 'The name of the column for positive probability.'}
- {name: Target lambda, type: String, default: '', description: 'Text of Python lambda function which returns boolean value indicating whether the classification result is correct.\nFor example, "lambda x: x[''a''] and x[''b'']". If missing, input must have a "target" column.'}
- {name: Output dir, type: GCSPath, description: 'GCS path of the output directory.'} #TODO: Replace dir with single file # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: MLPipeline UI metadata, type: UI metadata}
- {name: MLPipeline Metrics, type: Metrics}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:ad9bd5648dd0453005225779f25d8cebebc7ca00
command: [python2, /ml/roc.py]
args: [
--predictions, {inputValue: Predictions dir},
--trueclass, {inputValue: True class},
--true_score_column, {inputValue: True score column},
--target_lambda, {inputValue: Target lambda},
--output, {inputValue: Output dir},
]
fileOutputs:
MLPipeline UI metadata: /mlpipeline-ui-metadata.json
MLPipeline Metrics: /mlpipeline-metrics.json
| 8,159 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local | kubeflow_public_repos/kfp-tekton-backend/components/local/roc/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-local-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/roc.py"]
| 8,160 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/local/roc | kubeflow_public_repos/kfp-tekton-backend/components/local/roc/src/roc.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to generate ROC data out of prediction results.
# Usage:
# python roc.py \
# --predictions=gs://bradley-playground/sfpd/predictions/part-* \
# --trueclass=ACTION \
# --output=gs://bradley-playground/sfpd/roc/ \
import argparse
import json
import os
import urlparse
import pandas as pd
from sklearn.metrics import roc_curve, roc_auc_score
from tensorflow.python.lib.io import file_io
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Trainer')
parser.add_argument('--predictions', type=str, help='GCS path of prediction file pattern.')
parser.add_argument('--trueclass', type=str, default='true',
help='The name of the class as true value. If missing, assuming it is ' +
'binary classification and default to "true".')
parser.add_argument('--true_score_column', type=str, default='true',
help='The name of the column for positive prob. If missing, assuming it is ' +
'binary classification and defaults to "true".')
parser.add_argument('--target_lambda', type=str,
help='a lambda function as a string to determine positive or negative.' +
'For example, "lambda x: x[\'a\'] and x[\'b\']". If missing, ' +
'input must have a "target" column.')
parser.add_argument('--output', type=str, help='GCS path of the output directory.')
args = parser.parse_args()
storage_service_scheme = urlparse.urlparse(args.output).scheme
on_cloud = True if storage_service_scheme else False
if not on_cloud and not os.path.exists(args.output):
os.makedirs(args.output)
schema_file = os.path.join(os.path.dirname(args.predictions), 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
names = [x['name'] for x in schema]
if not args.target_lambda and 'target' not in names:
raise ValueError('There is no "target" column, and target_lambda is not provided.')
if args.true_score_column not in names:
raise ValueError('Cannot find column name "%s"' % args.true_score_column)
dfs = []
files = file_io.get_matching_files(args.predictions)
for file in files:
with file_io.FileIO(file, 'r') as f:
dfs.append(pd.read_csv(f, names=names))
df = pd.concat(dfs)
if args.target_lambda:
df['target'] = df.apply(eval(args.target_lambda), axis=1)
else:
df['target'] = df['target'].apply(lambda x: 1 if x == args.trueclass else 0)
fpr, tpr, thresholds = roc_curve(df['target'], df[args.true_score_column])
roc_auc = roc_auc_score(df['target'], df[args.true_score_column])
df_roc = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'thresholds': thresholds})
roc_file = os.path.join(args.output, 'roc.csv')
with file_io.FileIO(roc_file, 'w') as f:
df_roc.to_csv(f, columns=['fpr', 'tpr', 'thresholds'], header=False, index=False)
metadata = {
'outputs': [{
'type': 'roc',
'format': 'csv',
'schema': [
{'name': 'fpr', 'type': 'NUMBER'},
{'name': 'tpr', 'type': 'NUMBER'},
{'name': 'thresholds', 'type': 'NUMBER'},
],
'source': roc_file
}]
}
with file_io.FileIO('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
metrics = {
'metrics': [{
'name': 'roc-auc-score',
'numberValue': roc_auc,
}]
}
with file_io.FileIO('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
if __name__== "__main__":
main()
| 8,161 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/diagnostics | kubeflow_public_repos/kfp-tekton-backend/components/diagnostics/diagnose_me/component.py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple, Optional
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: list = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list.json_output:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
| 8,162 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/diagnostics | kubeflow_public_repos/kfp-tekton-backend/components/diagnostics/diagnose_me/component.yaml | name: Run diagnose me
description: |-
Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
inputs:
- name: bucket
type: String
- name: execution_mode
type: String
- name: project_id
type: String
- name: target_apis
type: String
- name: quota_check
type: JsonArray
optional: true
outputs:
- name: bucket
type: String
- name: project_id
type: String
implementation:
container:
image: google/cloud-sdk:279.0.0
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: list = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list.json_output:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
import json
import argparse
_parser = argparse.ArgumentParser(prog='Run diagnose me', description='Performs environment verification specific to this pipeline.\n\n args:\n bucket:\n string name of the bucket to be checked. Must be of the format\n gs://bucket_root/any/path/here/is/ignored where any path beyond root\n is ignored.\n execution_mode:\n If set to HALT_ON_ERROR will case any error to raise an exception.\n This is intended to stop the data processing of a pipeline. Can set\n to False to only report Errors/Warnings.\n project_id:\n GCP project ID which is assumed to be the project under which\n current pod is executing.\n target_apis:\n String consisting of a comma separated list of apis to be verified.\n quota_check:\n List of entries describing how much quota is required. Each entry\n has three fields: region, metric and quota_needed. All\n string-typed.\n Raises:\n RuntimeError: If configuration is not setup properly and\n HALT_ON_ERROR flag is set.')
_parser.add_argument("--bucket", dest="bucket", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--execution-mode", dest="execution_mode", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--project-id", dest="project_id", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--target-apis", dest="target_apis", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--quota-check", dest="quota_check", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = run_diagnose_me(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
_output_serializers = [
_serialize_str,
_serialize_str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --bucket
- inputValue: bucket
- --execution-mode
- inputValue: execution_mode
- --project-id
- inputValue: project_id
- --target-apis
- inputValue: target_apis
- if:
cond:
isPresent: quota_check
then:
- --quota-check
- inputValue: quota_check
- '----output-paths'
- outputPath: bucket
- outputPath: project_id
| 8,163 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow/tfma/component.yaml | name: TFX - Analyze model
description: |
Runs Tensorflow Model Analysis. https://www.tensorflow.org/tfx/model_analysis/get_started
TensorFlow Model Analysis allows you to perform model evaluations in the TFX pipeline, and view resultant metrics and plots in a Jupyter notebook. Specifically, it can provide:
* metrics computed on entire training and holdout dataset, as well as next-day evaluations
* tracking metrics over time
* model quality performance on different feature slices
inputs:
- {name: Model, type: GCSPath, description: GCS path to the model which will be evaluated.} # type: {GCSPath: {path_type: Directory, data_type: Exported TensorFlow models dir}}
- {name: Evaluation data, type: GCSPath, description: GCS path of eval files.} # type: {GCSPath: {data_type: CSV}}
- {name: Schema, type: GCSPath, description: GCS json schema file path.} # type: {GCSPath: {data_type: TFDV schema JSON}}
- {name: Run mode, type: String, default: local, description: whether to run the job locally or in Cloud Dataflow.}
- {name: GCP project, type: GCPProjectID, default: '', description: 'The GCP project to run the dataflow job, if running in the `cloud` mode.'}
- {name: Slice columns, type: String, description: Comma-separated list of columns on which to slice for analysis.}
- {name: Analysis results dir, type: GCSPath, description: GCS or local directory where the analysis results should be written.} # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Analysis results dir, type: GCSPath, description: GCS or local directory where the analysis results should were written.} # type: {GCSPath: {path_type: Directory}}
- {name: MLPipeline UI metadata, type: UI metadata}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:57d9f7f1cfd458e945d297957621716062d89a49
command: [python2, /ml/model_analysis.py]
args: [
--model, {inputValue: Model},
--eval, {inputValue: Evaluation data},
--schema, {inputValue: Schema},
--mode, {inputValue: Run mode},
--project, {inputValue: GCP project},
--slice-columns, {inputValue: Slice columns},
--output, {inputValue: Analysis results dir},
]
fileOutputs:
Analysis results dir: /output.txt
MLPipeline UI metadata: /mlpipeline-ui-metadata.json
| 8,164 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow/predict/component.yaml | name: Predict using TF on Dataflow
description: |
Runs TensorFlow prediction on Google Cloud Dataflow
Input and output data is in GCS
inputs:
- {name: Data file pattern, type: GCSPath, description: 'GCS or local path of test file patterns.'} # type: {GCSPath: {data_type: CSV}}
- {name: Schema, type: GCSPath, description: 'GCS json schema file path.'} # type: {GCSPath: {data_type: TFDV schema JSON}}
- {name: Target column, type: String, description: 'Name of the column for prediction target.'}
- {name: Model, type: GCSPath, description: 'GCS or local path of model trained with tft preprocessed data.'} # Models trained with estimator are exported to base/export/export/123456781 directory. # Our trainer export only one model. #TODO: Output single model from trainer # type: {GCSPath: {path_type: Directory, data_type: Exported TensorFlow models dir}}
- {name: Batch size, type: Integer, default: '32', description: 'Batch size used in prediction.'}
- {name: Run mode, type: String, default: local, description: 'Whether to run the job locally or in Cloud Dataflow. Valid values are "local" and "cloud".'}
- {name: GCP project, type: GCPProjectID, description: 'The GCP project to run the dataflow job.'}
- {name: Predictions dir, type: GCSPath, description: 'GCS or local directory.'} #Will contain prediction_results-* and schema.json files; TODO: Split outputs and replace dir with single file # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Predictions dir, type: GCSPath, description: 'GCS or local directory.'} #Will contain prediction_results-* and schema.json files; TODO: Split outputs and replace dir with single file # type: {GCSPath: {path_type: Directory}}
- {name: MLPipeline UI metadata, type: UI metadata}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:57d9f7f1cfd458e945d297957621716062d89a49
command: [python2, /ml/predict.py]
args: [
--data, {inputValue: Data file pattern},
--schema, {inputValue: Schema},
--target, {inputValue: Target column},
--model, {inputValue: Model},
--mode, {inputValue: Run mode},
--project, {inputValue: GCP project},
--batchsize, {inputValue: Batch size},
--output, {inputValue: Predictions dir},
]
fileOutputs:
Predictions dir: /output.txt
MLPipeline UI metadata: /mlpipeline-ui-metadata.json
| 8,165 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow/tfdv/component.yaml | name: TFX - Data Validation
description: |
Runs Tensorflow Data Validation. https://www.tensorflow.org/tfx/data_validation/get_started
Tensorflow Data Validation (TFDV) can analyze training and serving data to:
* compute descriptive statistics,
* infer a schema,
* detect data anomalies.
inputs:
- {name: Inference data, type: GCSPath, description: GCS path of the CSV file from which to infer the schema.} # type: {GCSPath: {data_type: CSV}}
- {name: Validation data, type: GCSPath, description: GCS path of the CSV file whose contents should be validated.} # type: {GCSPath: {data_type: CSV}}
- {name: Column names, type: GCSPath, description: GCS json file containing a list of column names.} # type: {GCSPath: {data_type: JSON}}
- {name: Key columns, type: String, description: Comma separated list of columns to treat as keys.}
- {name: GCP project, type: GCPProjectID, default: '', description: The GCP project to run the dataflow job.}
- {name: Run mode, type: String, default: local, description: Whether to run the job locally or in Cloud Dataflow. Valid values are "local" and "cloud". }
- {name: Validation output, type: GCSPath, description: GCS or local directory.} # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Schema, type: GCSPath, description: GCS path of the inferred schema JSON.} # type: {GCSPath: {data_type: TFDV schema JSON}}
- {name: Validation result, type: String, description: Indicates whether anomalies were detected or not.}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:57d9f7f1cfd458e945d297957621716062d89a49
command: [python2, /ml/validate.py]
args: [
--csv-data-for-inference, {inputValue: Inference data},
--csv-data-to-validate, {inputValue: Validation data},
--column-names, {inputValue: Column names},
--key-columns, {inputValue: Key columns},
--project, {inputValue: GCP project},
--mode, {inputValue: Run mode},
--output, {inputValue: Validation output},
]
fileOutputs:
Schema: /schema.txt
Validation result: /output_validation_result.txt | 8,166 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataflow/tft/component.yaml | name: Transform using TF on Dataflow
description: Runs TensorFlow Transform on Google Cloud Dataflow
inputs:
- {name: Training data file pattern, type: GCSPath, description: 'GCS path of train file patterns.'} #Also supports local CSV # type: {GCSPath: {data_type: CSV}}
- {name: Evaluation data file pattern, type: GCSPath, description: 'GCS path of eval file patterns.'} #Also supports local CSV # type: {GCSPath: {data_type: CSV}}
- {name: Schema, type: GCSPath, description: 'GCS json schema file path.'} # type: {GCSPath: {data_type: JSON}}
- {name: GCP project, type: GCPProjectID, description: 'The GCP project to run the dataflow job.'}
- {name: Run mode, type: String, default: local, description: 'Whether to run the job locally or in Cloud Dataflow. Valid values are "local" and "cloud".' }
- {name: Preprocessing module, type: GCSPath, default: '', description: 'GCS path to a python file defining "preprocess" and "get_feature_columns" functions.'} # type: {GCSPath: {data_type: Python}}
- {name: Transformed data dir, type: GCSPath, description: 'GCS or local directory'} #Also supports local paths # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Transformed data dir, type: GCSPath} # type: {GCSPath: {path_type: Directory}}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:57d9f7f1cfd458e945d297957621716062d89a49
command: [python2, /ml/transform.py]
args: [
--train, {inputValue: Training data file pattern},
--eval, {inputValue: Evaluation data file pattern},
--schema, {inputValue: Schema},
--project, {inputValue: GCP project},
--mode, {inputValue: Run mode},
--preprocessing-module, {inputValue: Preprocessing module},
--output, {inputValue: Transformed data dir},
]
fileOutputs:
Transformed data dir: /output.txt
| 8,167 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-analyze "$@"
| 8,168 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/analyze.py"]
| 8,169 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze/src/analyze.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage:
# python analyze.py \
# --project bradley-playground \
# --region us-central1 \
# --cluster ten4 \
# --output gs://bradley-playground/analysis \
# --train gs://bradley-playground/sfpd/train.csv \
# --schema gs://bradley-playground/schema.json \
import argparse
import os
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Analyzer')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--train', type=str, help='GCS path of the training csv file.')
parser.add_argument('--schema', type=str, help='GCS path of the json schema file.')
args = parser.parse_args()
code_path = os.path.dirname(os.path.realpath(__file__))
runfile_source = os.path.join(code_path, 'analyze_run.py')
dest_files = _utils.copy_resources_to_gcs([runfile_source], args.output)
try:
api = _utils.get_client()
print('Submitting job...')
spark_args = ['--output', args.output, '--train', args.train, '--schema', args.schema]
job_id = _utils.submit_pyspark_job(
api, args.project, args.region, args.cluster, dest_files[0], spark_args)
print('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
with open('/output.txt', 'w') as f:
f.write(args.output)
print('Job completed.')
finally:
_utils.remove_resources_from_gcs(dest_files)
if __name__== "__main__":
main()
| 8,170 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/analyze/src/analyze_run.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import DoubleType, IntegerType, StringType
import pandas as pd
from tensorflow.python.lib.io import file_io
from pyspark.sql.session import SparkSession
import json
import os
VOCAB_ANALYSIS_FILE = 'vocab_%s.csv'
STATS_FILE = 'stats.json'
def load_schema(schema_file):
type_map = {
'KEY': StringType(),
'NUMBER': DoubleType(),
'CATEGORY': StringType(),
'TEXT': StringType(),
'IMAGE_URL': StringType()
}
schema_json = json.loads(file_io.read_file_to_string(schema_file))
fields = [StructField(x['name'], type_map[x['type']]) for x in schema_json]
return schema_json, StructType(fields)
def get_columns_of_type(datatype, schema_json):
return [x['name'] for x in schema_json if x['type'] == datatype]
parser = argparse.ArgumentParser(description='ML')
parser.add_argument('--output', type=str)
parser.add_argument('--train', type=str)
parser.add_argument('--schema', type=str)
args = parser.parse_args()
schema_json, schema = load_schema(args.schema)
text_columns = get_columns_of_type('TEXT', schema_json)
category_columns = get_columns_of_type('CATEGORY', schema_json)
number_columns = get_columns_of_type('NUMBER', schema_json)
spark = SparkSession.builder.appName("MLAnalyzer").getOrCreate()
df = spark.read.schema(schema).csv(args.train)
df.createOrReplaceTempView("train")
num_examples = df.sql_ctx.sql(
'SELECT COUNT(*) AS num_examples FROM train').collect()[0].num_examples
stats = {'column_stats': {}, 'num_examples': num_examples}
for col in text_columns:
col_data = df.sql_ctx.sql("""
SELECT token, COUNT(token) AS token_count
FROM (SELECT EXPLODE(SPLIT({name}, \' \')) AS token FROM train)
GROUP BY token
ORDER BY token_count DESC, token ASC""".format(name=col))
token_counts = [(r.token, r.token_count) for r in col_data.collect()]
csv_string = pd.DataFrame(token_counts).to_csv(index=False, header=False)
file_io.write_string_to_file(os.path.join(args.output, VOCAB_ANALYSIS_FILE % col), csv_string)
stats['column_stats'][col] = {'vocab_size': len(token_counts)}
for col in category_columns:
col_data = df.sql_ctx.sql("""
SELECT {name} as token, COUNT({name}) AS token_count
FROM train
GROUP BY token
ORDER BY token_count DESC, token ASC
""".format(name=col))
token_counts = [(r.token, r.token_count) for r in col_data.collect()]
csv_string = pd.DataFrame(token_counts).to_csv(index=False, header=False)
file_io.write_string_to_file(os.path.join(args.output, VOCAB_ANALYSIS_FILE % col), csv_string)
stats['column_stats'][col] = {'vocab_size': len(token_counts)}
for col in number_columns:
col_stats = df.sql_ctx.sql("""
SELECT MAX({name}) AS max_value, MIN({name}) AS min_value, AVG({name}) AS mean_value
FROM train""".format(name=col)).collect()
stats['column_stats'][col] = {'min': col_stats[0].min_value, 'max': col_stats[0].max_value, 'mean': col_stats[0].mean_value}
file_io.write_string_to_file(os.path.join(args.output, STATS_FILE), json.dumps(stats, indent=2, separators=(',', ': ')))
file_io.write_string_to_file(os.path.join(args.output, 'schema.json'), json.dumps(schema_json, indent=2, separators=(',', ': ')))
| 8,171 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/common/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,172 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/common/_utils.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import googleapiclient.discovery as discovery
import os
import subprocess
import time
def get_client():
"""Builds a client to the dataproc API."""
dataproc = discovery.build('dataproc', 'v1')
return dataproc
def create_cluster(api, project, region, cluster_name, init_file_url):
"""Create a DataProc clsuter."""
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
},
'softwareConfig': {
'imageVersion': '1.2'
},
'initializationActions': {
'executableFile': init_file_url
}
}
}
result = api.projects().regions().clusters().create(
projectId=project,
region=region,
body=cluster_data).execute()
return result
def delete_cluster(api, project, region, cluster):
result = api.projects().regions().clusters().delete(
projectId=project,
region=region,
clusterName=cluster).execute()
return result
def wait_for_operation(api, job_name):
"""Waiting for a long running operation by polling it."""
while True:
result = api.projects().regions().operations().get(name=job_name).execute()
if result.get('done'):
if result['metadata']['status']['state'] == 'DONE':
return result
else:
raise Exception(result)
time.sleep(5)
def wait_for_job(api, project, region, job_id):
"""Waiting for a job by polling it."""
while True:
result = api.projects().regions().jobs().get(
projectId=project,
region=region,
jobId=job_id).execute()
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
return result
time.sleep(5)
def submit_pyspark_job(api, project, region, cluster_name, filepath, args):
"""Submits the Pyspark job to the cluster"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': filepath,
'args': args
}
}
}
result = api.projects().regions().jobs().submit(
projectId=project,
region=region,
body=job_details).execute()
job_id = result['reference']['jobId']
return job_id
def submit_spark_job(api, project, region, cluster_name, jar_files, main_class, args):
"""Submits the spark job to the cluster"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'sparkJob': {
'jarFileUris': jar_files,
'mainClass': main_class,
'args': args,
}
}
}
result = api.projects().regions().jobs().submit(
projectId=project,
region=region,
body=job_details).execute()
job_id = result['reference']['jobId']
return job_id
def copy_resources_to_gcs(file_paths, gcs_path):
"""Copy a local resources to a GCS location."""
tmpdir = datetime.datetime.now().strftime('xgb_%y%m%d_%H%M%S')
dest_files = []
for file_name in file_paths:
dest_file = os.path.join(gcs_path, tmpdir, os.path.basename(file_name))
subprocess.call(['gcloud', 'auth', 'activate-service-account', '--key-file', os.environ['GOOGLE_APPLICATION_CREDENTIALS']])
subprocess.call(['gsutil', 'cp', file_name, dest_file])
dest_files.append(dest_file)
return dest_files
def remove_resources_from_gcs(file_paths):
"""Remove staged resources from a GCS location."""
subprocess.call(['gsutil', '-m', 'rm'] + file_paths)
def delete_directory_from_gcs(dir_path):
"""Delete a GCS dir recursively. Ignore errors."""
try:
subprocess.call(['gsutil', '-m', 'rm', '-r', dir_path])
except:
pass
| 8,173 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-train "$@"
| 8,174 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/train.py"]
| 8,175 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/src/xgb4j_build.sh | #!/bin/bash -e
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The script configures build environment for xgboost4j and distributed
# XGBoost training package.
# This needs to run on a debian GCE VM (debian 8) to match dataproc workers.
# Steps:
# 1. Create a GCE debian VM.
# 2. On the VM under ~ directory, run
# xgb4j_build.sh gs://b/path/to/XGBoost*.scala gs://b/o/path/to/hold/package
# The generated package (jar) will be copied to gs://b/o/path/to/hold/package.
sudo apt-get update
sudo apt install -t jessie-backports build-essential git maven openjdk-8-jre-headless \
openjdk-8-jre openjdk-8-jdk-headless openjdk-8-jdk ca-certificates-java -y
wget --no-verbose http://www.cmake.org/files/v3.5/cmake-3.5.2.tar.gz
tar xf cmake-3.5.2.tar.gz
cd cmake-3.5.2
./configure
make
sudo make install
cd ..
export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
sudo git clone --recursive https://github.com/dmlc/xgboost
cd xgboost/
sudo chmod -R 777 .
sudo make -j4
sudo chmod -R 777 .
cd jvm-packages
gsutil cp $1 ./xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/
mvn -DskipTests=true package
gsutil cp xgboost4j-example/target/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar $2
rm -rf cmake-3.5.2
rm -rf xgboost
| 8,176 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/src/XGBoostPredictor.scala | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.example.spark
import com.google.gson.Gson
import java.io._
import ml.dmlc.xgboost4j.scala.spark.XGBoost
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions._
import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import scala.sys.process.Process
import scala.util.parsing.json.JSON
/** A distributed XGBoost predictor program running in spark cluster.
* Args:
* model-path: GCS path of the trained xgboost model.
* predict-data-path: GCS path of the prediction libsvm file pattern.
* num-workers: number of spark worker node used for training.
* analysis-path: GCS path of analysis results directory.
* target-name: column name of the prediction target.
* output-path: GCS path to store the prediction results.
*/
case class SchemaEntry(name: String, `type`: String)
object XGBoostPredictor {
// TODO: create a common class for the util functions.
def column_feature_size(stats: (String, Any), target: String): Double = {
if (stats._1 == target) 0.0
val statsMap = stats._2.asInstanceOf[Map[String, Any]]
if (statsMap.keys.exists(_ == "vocab_size")) statsMap("vocab_size").asInstanceOf[Double]
else if (statsMap.keys.exists(_ == "max")) 1.0
else 0.0
}
def get_feature_size(statsPath: String, target: String): Int = {
val sparkSession = SparkSession.builder().getOrCreate()
val schema_string = sparkSession.sparkContext.wholeTextFiles(
statsPath).map(tuple => tuple._2).collect()(0)
val column_stats = JSON.parseFull(schema_string).get.asInstanceOf[Map[String, Any]](
"column_stats").asInstanceOf[Map[String, Any]]
var sum = 0.0
for (stats <- column_stats) sum = sum + column_feature_size(stats, target)
sum.toInt
}
def isClassificationTask(schemaFile: String, targetName: String): Boolean = {
val sparkSession = SparkSession.builder().getOrCreate()
val schemaString = sparkSession.sparkContext.wholeTextFiles(
schemaFile).map(tuple => tuple._2).collect()(0)
val schema = JSON.parseFull(schemaString).get.asInstanceOf[List[Map[String, String]]]
val targetList = schema.filter(x => x("name") == targetName)
if (targetList.isEmpty) {
throw new IllegalArgumentException("target cannot be found.")
}
val targetType = targetList(0)("type")
if (targetType == "CATEGORY") true
else if (targetType == "NUMBER") false
else throw new IllegalArgumentException("invalid target type.")
}
def getVocab(vocabFile: String): Array[String] = {
val sparkSession = SparkSession.builder().getOrCreate()
val vocabContent = sparkSession.sparkContext.wholeTextFiles(vocabFile).map(
tuple => tuple._2).collect()(0)
val vocabFreq = vocabContent.split("\n")
val vocab = for (e <- vocabFreq) yield e.split(",")(0)
vocab
}
def labelIndexToStringUdf(vocab: Array[String]): UserDefinedFunction = {
val lookup: (Double => String) = (label: Double) => (vocab(label.toInt))
udf(lookup)
}
def probsToPredictionUdf(vocab: Array[String]): UserDefinedFunction = {
val convert: (Double => String) = (prob: Double) => (if (prob >= 0.5) vocab(1) else vocab(0))
udf(convert)
}
def writeSchemaFile(output: String, schema: Any): Unit = {
val gson = new Gson
val content = gson.toJson(schema)
val pw = new PrintWriter(new File("schema.json" ))
pw.write(content)
pw.close()
Process("gsutil cp schema.json " + output + "/schema.json").run
}
def main(args: Array[String]): Unit = {
if (args.length != 5) {
println(
"usage: program model-path predict-data-path analysis-path " +
"target-name, output-path")
sys.exit(1)
}
val sparkSession = SparkSession.builder().getOrCreate()
val modelPath = args(0)
val inputPredictPath = args(1)
val analysisPath = args(2)
val targetName = args(3)
val outputPath = args(4)
// build dataset
val feature_size = get_feature_size(analysisPath + "/stats.json", targetName)
val predictDF = (sparkSession.sqlContext.read.format("libsvm")
.option("numFeatures", feature_size.toString).load(inputPredictPath))
println("start prediction -------\n")
implicit val sc = SparkContext.getOrCreate()
val xgbModel = XGBoost.loadModelFromHadoopFile(modelPath)
val predictResultsDF = xgbModel.transform(predictDF)
val isClassification = isClassificationTask(analysisPath + "/schema.json", targetName)
if (isClassification) {
val targetVocab = getVocab(analysisPath + "/vocab_" + targetName + ".csv")
val lookupUdf = labelIndexToStringUdf(targetVocab)
var processedDF = (predictResultsDF.withColumn("target", lookupUdf(col("label")))
.withColumn("predicted", lookupUdf(col("prediction"))))
var schema = Array(SchemaEntry("target", "CATEGORY"),
SchemaEntry("predicted", "CATEGORY"))
var columns = Array("target", "predicted")
if (predictResultsDF.columns.contains("probabilities")) {
// probabilities column includes array of probs for each class. Need to expand it.
var classIndex = 0
for (classname <- targetVocab) {
// Need to make a val copy because the value of "classIndex" is evaluated at "run"
// later when the resulting dataframe is used.
val classIndexCopy = classIndex
val extractValue = udf((arr: Vector) => arr(classIndexCopy))
processedDF = processedDF.withColumn(classname, extractValue(col("probabilities")))
schema :+= SchemaEntry(classname, "NUMBER")
classIndex += 1
}
columns = columns ++ (for (e <- targetVocab) yield "`" + e + "`")
}
processedDF.select(columns.map(col): _*).write.option("header", "false").csv(outputPath)
writeSchemaFile(outputPath, schema)
} else {
// regression
val processedDF = (predictResultsDF.withColumnRenamed("prediction", "predicted")
.withColumnRenamed("label", "target"))
processedDF.select("target", "predicted").write.option("header", "false").csv(outputPath)
val schema = Array(SchemaEntry("target", "NUMBER"),
SchemaEntry("predicted", "NUMBER"))
writeSchemaFile(outputPath, schema)
}
}
}
| 8,177 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/src/README.md | # XGBoost Distributed Trainer and Predictor Package
This directory contains code and building script to build a generic XGBoost model and perform
predictions on it.
XGBoost4j package currently requires building from source
(https://github.com/dmlc/xgboost/issues/1807), as well as the spark layer and user code on top
of it. To do so, get a GCE VM (debian 8), and run
[xgb4j_build.sh](xgb4j_build.sh) on it. The script contains steps to compile/install cmake,
git clone xgboost repo, copy sources, and build a jar package that can run in a spark environment.
This is only tested on Google Dataproc cluster.
| 8,178 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/src/XGBoostTrainer.scala | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.example.spark
import ml.dmlc.xgboost4j.scala.Booster
import ml.dmlc.xgboost4j.scala.spark.XGBoost
import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.col
import scala.util.parsing.json.JSON
/** A distributed XGBoost trainer program running in spark cluster.
* Args:
* train-conf: GCS path of the training config json file for xgboost training.
* num-of-rounds: number of rounds to train.
* num-workers: number of spark worker node used for training.
* analysis-path: GCS path of analysis results directory.
* target-name: column name of the prediction target.
* training-path: GCS path of training libsvm file patterns.
* eval-path: GCS path of eval libsvm file patterns.
* output-path: GCS path to store the trained model.
*/
object XGBoostTrainer {
def column_feature_size(stats: (String, Any), target: String): Double = {
if (stats._1 == target) 0.0
val statsMap = stats._2.asInstanceOf[Map[String, Any]]
if (statsMap.keys.exists(_ == "vocab_size")) statsMap("vocab_size").asInstanceOf[Double]
else if (statsMap.keys.exists(_ == "max")) 1.0
else 0.0
}
def get_feature_size(statsPath: String, target: String): Int = {
val sparkSession = SparkSession.builder().getOrCreate()
val schema_string = sparkSession.sparkContext.wholeTextFiles(
statsPath).map(tuple => tuple._2).collect()(0)
val column_stats = JSON.parseFull(schema_string).get.asInstanceOf[Map[String, Any]](
"column_stats").asInstanceOf[Map[String, Any]]
var sum = 0.0
for (stats <- column_stats) sum = sum + column_feature_size(stats, target)
sum.toInt
}
def read_config(configFile: String): Map[String, Any] = {
val sparkSession = SparkSession.builder().getOrCreate()
val confString = sparkSession.sparkContext.wholeTextFiles(
configFile).map(tuple => tuple._2).collect()(0)
// Avoid parsing "500" to "500.0"
val originNumberParser = JSON.perThreadNumberParser
JSON.perThreadNumberParser = {
in => try in.toInt catch { case _: NumberFormatException => in.toDouble}
}
try JSON.parseFull(confString).get.asInstanceOf[Map[String, Any]] finally {
JSON.perThreadNumberParser = originNumberParser
}
}
def isClassificationTask(schemaFile: String, targetName: String): Boolean = {
val sparkSession = SparkSession.builder().getOrCreate()
val schemaString = sparkSession.sparkContext.wholeTextFiles(
schemaFile).map(tuple => tuple._2).collect()(0)
val schema = JSON.parseFull(schemaString).get.asInstanceOf[List[Map[String, String]]]
val targetList = schema.filter(x => x("name") == targetName)
if (targetList.isEmpty) {
throw new IllegalArgumentException("target cannot be found.")
}
val targetType = targetList(0)("type")
if (targetType == "CATEGORY") true
else if (targetType == "NUMBER") false
else throw new IllegalArgumentException("invalid target type.")
}
def main(args: Array[String]): Unit = {
if (args.length != 8) {
println(
"usage: program train-conf num-of-rounds num-workers analysis-path " +
"target-name training-path eval-path output-path")
sys.exit(1)
}
val sparkSession = SparkSession.builder().getOrCreate()
val trainConf = args(0)
val numRounds = args(1).toInt
val numWorkers = args(2).toInt
val analysisPath = args(3)
val targetName = args(4)
val inputTrainPath = args(5)
val inputTestPath = args(6)
val outputPath = args(7)
// build dataset
val feature_size = get_feature_size(analysisPath + "/stats.json", targetName)
val trainDF = sparkSession.sqlContext.read.format("libsvm").option(
"numFeatures", feature_size.toString).load(inputTrainPath)
val testDF = sparkSession.sqlContext.read.format("libsvm").option(
"numFeatures", feature_size.toString).load(inputTestPath)
// start training
val paramMap = read_config(trainConf)
val xgboostModel = XGBoost.trainWithDataFrame(
trainDF, paramMap, numRounds, nWorkers = numWorkers, useExternalMemory = true)
println("training summary -------\n")
println(xgboostModel.summary)
// xgboost-spark appends the column containing prediction results
val predictionDF = xgboostModel.transform(testDF)
val classification = isClassificationTask(analysisPath + "/schema.json", targetName)
implicit val sc = SparkContext.getOrCreate()
if (classification) {
val correctCounts = predictionDF.filter(
col("prediction") === col("label")).groupBy(col("label")).count.collect
val totalCounts = predictionDF.groupBy(col("label")).count.collect
val accuracyAll = (predictionDF.filter(col("prediction") === col("label")).count /
predictionDF.count.toDouble)
print("\naccuracy: " + accuracyAll + "\n")
} else {
predictionDF.createOrReplaceTempView("prediction")
val rmseDF = sparkSession.sql(
"SELECT SQRT(AVG((prediction - label) * (prediction - label))) FROM prediction")
val rmse = rmseDF.collect()(0).getDouble(0)
print("RMSE: " + rmse + "\n")
}
xgboostModel.saveModelAsHadoopFile(outputPath)
print("Done")
}
}
| 8,179 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/train/src/train.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to perform training of an XGBoost model through a dataproc cluster.
# Usage:
# python train.py \
# --project bradley-playground \
# --region us-central1 \
# --cluster ten4 \
# --package gs://bradley-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar \
# --output gs://bradley-playground/train/model \
# --conf gs://bradley-playground/trainconf.json \
# --rounds 300 \
# --workers 2 \
# --train gs://bradley-playground/transform/train/part-* \
# --eval gs://bradley-playground/transform/eval/part-* \
# --analysis gs://bradley-playground/analysis \
# --target resolution
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Trainer')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--package', type=str,
help='GCS Path of XGBoost distributed trainer package.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--conf', type=str, help='GCS path of the training json config file.')
parser.add_argument('--rounds', type=int, help='Number of rounds to train.')
parser.add_argument('--workers', type=int, help='Number of workers to use for training.')
parser.add_argument('--train', type=str, help='GCS path of the training libsvm file pattern.')
parser.add_argument('--eval', type=str, help='GCS path of the eval libsvm file pattern.')
parser.add_argument('--analysis', type=str, help='GCS path of the analysis input.')
parser.add_argument('--target', type=str, help='Target column name.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
api = _utils.get_client()
logging.info('Submitting job...')
spark_args = [args.conf, str(args.rounds), str(args.workers), args.analysis, args.target,
args.train, args.eval, args.output]
job_id = _utils.submit_spark_job(
api, args.project, args.region, args.cluster, [args.package],
'ml.dmlc.xgboost4j.scala.example.spark.XGBoostTrainer', spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
with open('/output.txt', 'w') as f:
f.write(args.output)
logging.info('Job completed.')
if __name__== "__main__":
main()
| 8,180 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-create-cluster "$@"
| 8,181 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/create_cluster.py"]
| 8,182 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster/src/initialization_actions.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Initialization actions to run in dataproc setup.
# The script will be run on each node in a dataproc cluster.
easy_install pip
pip install tensorflow==1.4.1
pip install pandas==0.18.1
| 8,183 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/create_cluster/src/create_cluster.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage:
# python create_cluster.py \
# --project bradley-playground \
# --zone us-central1-a \
# --name ten4 \
# --staging gs://bradley-playground
import argparse
import os
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML DataProc Setup')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone for GCE VMs.')
parser.add_argument('--name', type=str, help='The name of the cluster to create.')
parser.add_argument('--staging', type=str, help='GCS path to use for staging.')
args = parser.parse_args()
code_path = os.path.dirname(os.path.realpath(__file__))
init_file_source = os.path.join(code_path, 'initialization_actions.sh')
dest_files = _utils.copy_resources_to_gcs([init_file_source], args.staging)
try:
api = _utils.get_client()
print('Creating cluster...')
create_response = _utils.create_cluster(api, args.project, args.region, args.name, dest_files[0])
print('Cluster creation request submitted. Waiting for completion...')
_utils.wait_for_operation(api, create_response['name'])
with open('/output.txt', 'w') as f:
f.write(args.name)
print('Cluster created.')
finally:
_utils.remove_resources_from_gcs(dest_files)
if __name__== "__main__":
main()
| 8,184 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/base/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p ./build/common
rsync -arvp "../analyze/src"/ ./build/
rsync -arvp "../train/src"/ ./build/
rsync -arvp "../predict/src"/ ./build/
rsync -arvp "../create_cluster/src"/ ./build/
rsync -arvp "../delete_cluster/src"/ ./build/
rsync -arvp "../transform/src"/ ./build/
rsync -arvp "../common"/ ./build/common/
cp ../../../license.sh ./build
cp ../../../third_party_licenses.csv ./build
docker build -t ml-pipeline-dataproc-base .
rm -rf ./build
| 8,185 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/base/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget unzip
RUN easy_install pip
RUN pip install google-api-python-client==1.6.2
RUN pip install tensorflow==1.6.0
RUN wget -nv https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \
unzip -qq google-cloud-sdk.zip -d tools && \
rm google-cloud-sdk.zip && \
tools/google-cloud-sdk/install.sh --usage-reporting=false \
--path-update=false --bash-completion=false \
--disable-installation-options && \
tools/google-cloud-sdk/bin/gcloud -q components update \
gcloud core gsutil && \
tools/google-cloud-sdk/bin/gcloud config set component_manager/disable_update_check true && \
touch /tools/google-cloud-sdk/lib/third_party/google.py
ADD build /ml
ENV PATH $PATH:/tools/node/bin:/tools/google-cloud-sdk/bin
| 8,186 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/predict/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-predict "$@"
| 8,187 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/predict/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/predict.py"]
| 8,188 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/predict | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/predict/src/predict.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to perform prediction with an XGBoost model through a dataproc cluster.
#
# Usage:
# python predict.py \
# --project bradley-playground \
# --region us-central1 \
# --cluster my-cluster \
# --package gs://bradley-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar \
# --model gs://bradley-playground/model \
# --output gs://bradley-playground/predict/ \
# --workers 2 \
# --predict gs://bradley-playground/transform/eval/part-* \
# --analysis gs://bradley-playground/analysis \
# --target resolution
import argparse
import json
import os
from common import _utils
import logging
from tensorflow.python.lib.io import file_io
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Predictor')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--package', type=str,
help='GCS Path of XGBoost distributed trainer package.')
parser.add_argument('--model', type=str, help='GCS path of the model file.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--predict', type=str, help='GCS path of prediction libsvm file.')
parser.add_argument('--analysis', type=str, help='GCS path of the analysis input.')
parser.add_argument('--target', type=str, help='Target column name.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
api = _utils.get_client()
logging.info('Submitting job...')
spark_args = [args.model, args.predict, args.analysis, args.target, args.output]
job_id = _utils.submit_spark_job(
api, args.project, args.region, args.cluster, [args.package],
'ml.dmlc.xgboost4j.scala.example.spark.XGBoostPredictor', spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
prediction_results = os.path.join(args.output, 'part-*.csv')
with open('/output.txt', 'w') as f:
f.write(prediction_results)
with file_io.FileIO(os.path.join(args.output, 'schema.json'), 'r') as f:
schema = json.load(f)
metadata = {
'outputs' : [{
'type': 'table',
'storage': 'gcs',
'format': 'csv',
'header': [x['name'] for x in schema],
'source': prediction_results
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
logging.info('Job completed.')
if __name__== "__main__":
main()
| 8,189 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-transform "$@"
| 8,190 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/transform.py"]
| 8,191 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform/src/transform_run.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from collections import Counter
import six
from pyspark.sql import Row
from pyspark.mllib.linalg import SparseVector
from pyspark.mllib.util import MLUtils
from pyspark.mllib.regression import LabeledPoint
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import DoubleType, IntegerType, StringType
import pandas as pd
from tensorflow.python.lib.io import file_io
from pyspark.sql.session import SparkSession
VOCAB_ANALYSIS_FILE = "vocab_%s.csv"
STATS_FILE = 'stats.json'
def load_schema(analysis_path):
type_map = {
'KEY': StringType(),
'NUMBER': DoubleType(),
'CATEGORY': StringType(),
'TEXT': StringType(),
'IMAGE_URL': StringType()
}
schema_file = os.path.join(analysis_path, 'schema.json')
schema_json = json.loads(file_io.read_file_to_string(schema_file))
fields = [StructField(x['name'], type_map[x['type']]) for x in schema_json]
return schema_json, StructType(fields)
def get_columns_of_type(datatype, schema_json):
return [x['name'] for x in schema_json if x['type'] == datatype]
parser = argparse.ArgumentParser(description='ML')
parser.add_argument('--output', type=str)
parser.add_argument('--train', type=str)
parser.add_argument('--eval', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--analysis', type=str)
args = parser.parse_args()
schema_json, schema = load_schema(args.analysis)
text_columns = get_columns_of_type('TEXT', schema_json)
category_columns = get_columns_of_type('CATEGORY', schema_json)
number_columns = get_columns_of_type('NUMBER', schema_json)
classification = False
if args.target in number_columns:
number_columns.remove(args.target)
elif args.target in category_columns:
category_columns.remove(args.target)
classification = True
else:
raise ValueError(
'Specified target "%s" is neither in numeric or categorical columns' % args.target)
stats = json.loads(file_io.read_file_to_string(os.path.join(args.analysis, STATS_FILE)))
# Load vocab
vocab = {}
columns_require_vocab = text_columns + category_columns
if classification:
columns_require_vocab.append(args.target)
for col in columns_require_vocab:
with file_io.FileIO(os.path.join(args.analysis, VOCAB_ANALYSIS_FILE % col), 'r') as f:
vocab_df = pd.read_csv(f, header=None, names=['vocab', 'count'], dtype=str, na_filter=False)
vocab[col] = list(vocab_df.vocab)
# Calculate the feature size
feature_size = 0
for col in text_columns + category_columns:
feature_size += stats['column_stats'][col]['vocab_size']
for col in number_columns:
feature_size += 1
spark = SparkSession.builder.appName("ML Transformer").getOrCreate()
def make_process_rows_fn(
classification, target_col, text_cols, category_cols, number_cols, vocab, stats):
def process_rows(row):
feature_indices = []
feature_values = []
start_index = 0
for col in schema.names:
col_value = getattr(row, col)
if col in number_cols:
v_max = stats['column_stats'][col]['max']
v_min = stats['column_stats'][col]['min']
value = -1 + (col_value - v_min) * 2 / (v_max - v_min)
feature_indices.append(start_index)
feature_values.append(value)
start_index += 1
if col in category_cols:
if col_value in vocab[col]:
value_index = vocab[col].index(col_value)
feature_indices.append(start_index + value_index)
feature_values.append(1.0)
start_index += len(vocab[col])
if col in text_cols:
if col_value is not None:
values = col_value.split()
word_indices = []
for v in values:
if v in vocab[col]:
word_indices.append(start_index + vocab[col].index(v))
for k, v in sorted(six.iteritems(Counter(word_indices))):
feature_indices.append(k)
feature_values.append(float(v))
start_index += len(vocab[col])
if col == target_col:
label = vocab[col].index(col_value) if classification else col_value
return {"label": label, "indices": feature_indices, "values": feature_values}
return process_rows
process_row_fn = make_process_rows_fn(
classification, args.target, text_columns, category_columns, number_columns, vocab, stats)
dfs = []
if args.train:
dfTrain = spark.read.schema(schema).csv(args.train)
dfs.append(("train", dfTrain))
if args.eval:
dfEval = spark.read.schema(schema).csv(args.eval)
dfs.append(("eval", dfEval))
for name, df in dfs:
rdd = df.rdd.map(process_row_fn).map(
lambda row: LabeledPoint(row["label"],
SparseVector(feature_size, row["indices"], row["values"])))
MLUtils.saveAsLibSVMFile(rdd, os.path.join(args.output, name))
| 8,192 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/transform/src/transform.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage:
# python transform.py \
# --project bradley-playground \
# --region us-central1 \
# --cluster ten4 \
# --output gs://bradley-playground/transform \
# --train gs://bradley-playground/sfpd/train.csv \
# --eval gs://bradley-playground/sfpd/eval.csv \
# --analysis gs://bradley-playground/analysis \
# --target resolution
import argparse
import os
import subprocess
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Transfomer')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--train', type=str, help='GCS path of the training csv file.')
parser.add_argument('--eval', type=str, help='GCS path of the eval csv file.')
parser.add_argument('--analysis', type=str, help='GCS path of the analysis results.')
parser.add_argument('--target', type=str, help='Target column name.')
args = parser.parse_args()
# Remove existing [output]/train and [output]/eval if they exist.
# It should not be done in the run time code because run time code should be portable
# to on-prem while we need gsutil here.
_utils.delete_directory_from_gcs(os.path.join(args.output, 'train'))
_utils.delete_directory_from_gcs(os.path.join(args.output, 'eval'))
code_path = os.path.dirname(os.path.realpath(__file__))
runfile_source = os.path.join(code_path, 'transform_run.py')
dest_files = _utils.copy_resources_to_gcs([runfile_source], args.output)
try:
api = _utils.get_client()
print('Submitting job...')
spark_args = ['--output', args.output, '--analysis', args.analysis,
'--target', args.target]
if args.train:
spark_args.extend(['--train', args.train])
if args.eval:
spark_args.extend(['--eval', args.eval])
job_id = _utils.submit_pyspark_job(
api, args.project, args.region, args.cluster, dest_files[0], spark_args)
print('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
with open('/output_train.txt', 'w') as f:
f.write(os.path.join(args.output, 'train', 'part-*'))
with open('/output_eval.txt', 'w') as f:
f.write(os.path.join(args.output, 'eval', 'part-*'))
print('Job completed.')
finally:
_utils.remove_resources_from_gcs(dest_files)
if __name__== "__main__":
main()
| 8,193 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/delete_cluster/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# build base image
pushd ../base
./build_image.sh
popd
../../../build_image.sh -l ml-pipeline-dataproc-delete-cluster "$@"
| 8,194 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/delete_cluster/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ml-pipeline-dataproc-base
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/delete_cluster.py"]
| 8,195 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/delete_cluster | kubeflow_public_repos/kfp-tekton-backend/components/deprecated/dataproc/delete_cluster/src/delete_cluster.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage:
# python delete_cluster.py \
# --project bradley-playground \
# --region us-central1 \
# --name ten4
import argparse
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML DataProc Deletion')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--name', type=str, help='The name of the cluster to create.')
args = parser.parse_args()
api = _utils.get_client()
print('Tearing down cluster...')
delete_response = _utils.delete_cluster(api, args.project, args.region, args.name)
print('Cluster deletion request submitted. Waiting for completion...')
_utils.wait_for_operation(api, delete_response['name'])
print('Cluster deleted.')
if __name__== "__main__":
main()
| 8,196 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras/train_classifier/README.md | # Keras - Train classifier
### Trains classifier using Keras sequential model
## Inputs
|Name|Type|Default|Description|
|---|---|---|---|
|training_set_features_path|GcsPath: {data_type: TSV}||Local or GCS path to the training set features table.|
|training_set_labels_path|GcsPath: {data_type: TSV}||Local or GCS path to the training set labels (each label is a class index from 0 to num-classes - 1).|
|output_model_uri|GcsPath: {data_type: Keras model}||Local or GCS path specifying where to save the trained model. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model|
|model_config|GcsPath: {data_type: Keras model config json}||JSON string containing the serialized model structure. Can be obtained by calling model.to_json() on a Keras model.|
|number_of_classes|Integer||Number of classifier classes.|
|number_of_epochs|Integer|100|Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided.|
|batch_size|Integer|32|Number of samples per gradient update|
## Outputs
|Name|Type|Default|Description|
|---|---|---|---|
|output_model_uri|GcsPath: {data_type: Keras model}||GCS path where the trained model has been saved. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model|
## Container image
gcr.io/ml-pipeline/components/sample/keras/train_classifier
## Usage:
```python
import os
from pathlib import Path
import requests
import kfp
component_url_prefix = 'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/sample/keras/train_classifier/'
test_data_url_prefix = component_url_prefix + 'tests/testdata/'
#Prepare input/output paths and data
input_data_gcs_dir = 'gs://<my bucket>/<path>/'
output_data_gcs_dir = 'gs://<my bucket>/<path>/'
#Downloading the training set (to upload to GCS later)
training_set_features_local_path = os.path.join('.', 'training_set_features.tsv')
training_set_labels_local_path = os.path.join('.', 'training_set_labels.tsv')
training_set_features_url = test_data_url_prefix + '/training_set_features.tsv'
training_set_labels_url = test_data_url_prefix + '/training_set_labels.tsv'
Path(training_set_features_local_path).write_bytes(requests.get(training_set_features_url).content)
Path(training_set_labels_local_path).write_bytes(requests.get(training_set_labels_url).content)
#Uploading the data to GCS where it can be read by the trainer
training_set_features_gcs_path = os.path.join(input_data_gcs_dir, 'training_set_features.tsv')
training_set_labels_gcs_path = os.path.join(input_data_gcs_dir, 'training_set_labels.tsv')
gfile.Copy(training_set_features_local_path, training_set_features_gcs_path)
gfile.Copy(training_set_labels_local_path, training_set_labels_gcs_path)
output_model_uri_template = os.path.join(output_data_gcs_dir, kfp.dsl.EXECUTION_ID_PLACEHOLDER, 'output_model_uri', 'data')
xor_model_config = requests.get(test_data_url_prefix + 'model_config.json').content
#Load the component
train_op = kfp.components.load_component_from_url(component_url_prefix + 'component.yaml')
#Use the component as part of the pipeline
@kfp.dsl.pipeline(name='Test keras/train_classifier', description='Pipeline to test keras/train_classifier component')
def pipeline_to_test_keras_train_classifier():
train_task = train_op(
training_set_features_path=training_set_features_gcs_path,
training_set_labels_path=training_set_labels_gcs_path,
output_model_uri=output_model_uri_template,
model_config=xor_model_config,
number_of_classes=2,
number_of_epochs=10,
batch_size=32,
)
#Use train_task.outputs['output_model_uri'] to obtain the reference to the trained model URI that can be a passed to other pipeline tasks (e.g. for prediction or analysis)
```
| 8,197 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras/train_classifier/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
image_name=gcr.io/ml-pipeline/sample/keras/train_classifier
image_tag=latest
full_image_name=${image_name}:${image_tag}
base_image_tag=1.12.0-py3
cd "$(dirname "$0")"
docker build --build-arg BASE_IMAGE_TAG=$base_image_tag -t "$full_image_name" .
docker push "$full_image_name"
#Output the strict image name (which contains the sha256 image digest)
#This name can be used by the subsequent steps to refer to the exact image that was built even if another image with the same name was pushed.
image_name_with_digest=$(docker inspect --format="{{index .RepoDigests 0}}" "$IMAGE_NAME")
strict_image_name_output_file=./versions/image_digests_for_tags/$image_tag
mkdir -p "$(dirname "$strict_image_name_output_file")"
echo $image_name_with_digest | tee "$strict_image_name_output_file"
| 8,198 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras | kubeflow_public_repos/kfp-tekton-backend/components/sample/keras/train_classifier/run_tests.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cd $(dirname $0)
python3 -m unittest discover --verbose --start-dir tests --top-level-directory=..
| 8,199 |
Subsets and Splits